My task is to receive an audio stream and play it. On the server side, the audio is encoded by pcmInt16 (16bit, 44100 sample rate, 2 channels)
I accept a stream of bytes and encode in AVAudioPCMBuffer and then pass it to the player in the playMusicFromBuffer function.
while (inputStream!.hasBytesAvailable) {
let length = inputStream!.read(&buffer, maxLength: buffer.count)
if (length > 0) {
print("\(#file) > \(#function) > \(length) bytes read")
print(buffer)
let audioBuffer = bytesToAudioBuffer16(buffer)
playMusicFromBuffer(PCMBuffer: audioBuffer)
}
}
At first, for encoding in AVAudioPCMBuffer, I used the bytesToAudioBuffer method (the parameters do not match the server settings), however, the application starts and does not crash when converting, but still there is no sound.
func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let fmt = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100.0, channels: 1, interleaved: true)
let frameLength = UInt32(buf.count) / (fmt?.streamDescription.pointee.mBytesPerFrame)!
let audioBuffer = AVAudioPCMBuffer(pcmFormat: fmt!, frameCapacity: frameLength)
audioBuffer!.frameLength = frameLength
let dstLeft = audioBuffer?.floatChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Float.self, capacity: Int(frameLength))
dstLeft!.initialize(from: src, count: Int(frameLength))
}
print("Convert to AVAudioPCMBuffer")
return audioBuffer!
}
func bytesToAudioBuffer16(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let fmt = AVAudioFormat(commonFormat: .pcmFormatInt16, sampleRate: 44100.0, channels: 2, interleaved: true)
let frameLength = UInt32(buf.count) / (fmt?.streamDescription.pointee.mBytesPerFrame)!
let audioBuffer = AVAudioPCMBuffer(pcmFormat: fmt!, frameCapacity: frameLength)
audioBuffer!.frameLength = frameLength
let dstLeft = audioBuffer?.int16ChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Int16.self, capacity: Int(frameLength))
dstLeft!.initialize(from: src, count: Int(frameLength))
}
print("Convert to AVAudioPCMBuffer")
return audioBuffer!
}
var audioEngine: AVAudioEngine = AVAudioEngine()
var audioFilePlayer: AVAudioPlayerNode = AVAudioPlayerNode()
func playMusicFromBuffer(PCMBuffer: AVAudioPCMBuffer) {
let mainMixer = audioEngine.mainMixerNode
audioEngine.attach(audioFilePlayer)
audioEngine.connect(audioFilePlayer, to:mainMixer, format: PCMBuffer.format)
try? audioEngine.start()
audioFilePlayer.play()
audioFilePlayer.scheduleBuffer(PCMBuffer, at: nil, options:AVAudioPlayerNodeBufferOptions.loops)
}
Next, I tried to fix this by changing the settings and writing the bytesToAudioBuffer16 function. But here when starting the application, when I convert the stream to AVAudioPCMBuffer and transfer it to the player, an error occurs:
[reason __NSCFString * "[[busArray objectAtIndexedSubscript: (NSUInteger) element] setFormat: format error: & nsErr]: returned false, error Error Domain = NSOSStatusErrorDomain Code = -10868 \" (null) \ "" 0x0000600000d2e00.
The question is how to fix this situation. Do I need to fix bytesToAudioBuffer16 or playMusicFromBuffer. If so, how?
Related
I have tried to create a spectrogram using this apple tutorial but it uses live audio input from the microphone. I want to create one from an existing file. I have tried to convert apples example from live input to existing files with no luck, so I am wondering if there are any better resources out there.
Here is how I am getting the samples:
let samples: (naturalTimeScale: Int32, data: [Float]) = {
guard let samples = AudioUtilities.getAudioSamples(
forResource: resource,
withExtension: wExtension) else {
fatalError("Unable to parse the audio resource.")
}
return samples
}()
// Returns an array of single-precision values for the specified audio resource.
static func getAudioSamples(forResource: String,
withExtension: String) -> (naturalTimeScale: CMTimeScale,
data: [Float])? {
guard let path = Bundle.main.url(forResource: forResource,
withExtension: withExtension) else {
return nil
}
let asset = AVAsset(url: path.absoluteURL)
guard
let reader = try? AVAssetReader(asset: asset),
let track = asset.tracks.first else {
return nil
}
let outputSettings: [String: Int] = [
AVFormatIDKey: Int(kAudioFormatLinearPCM),
AVNumberOfChannelsKey: 1,
AVLinearPCMIsBigEndianKey: 0,
AVLinearPCMIsFloatKey: 1,
AVLinearPCMBitDepthKey: 32,
AVLinearPCMIsNonInterleaved: 1
]
let output = AVAssetReaderTrackOutput(track: track,
outputSettings: outputSettings)
reader.add(output)
reader.startReading()
var samplesData = [Float]()
while reader.status == .reading {
if
let sampleBuffer = output.copyNextSampleBuffer(),
let dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer) {
let bufferLength = CMBlockBufferGetDataLength(dataBuffer)
var data = [Float](repeating: 0,
count: bufferLength / 4)
CMBlockBufferCopyDataBytes(dataBuffer,
atOffset: 0,
dataLength: bufferLength,
destination: &data)
samplesData.append(contentsOf: data)
}
}
return (naturalTimeScale: track.naturalTimeScale, data: samplesData)
}
And here is how I am performing the "fft" or dct in this case:
static var sampleCount = 1024
let forwardDCT = vDSP.DCT(count: sampleCount,
transformType: .II)
guard let freqs = forwardDCT?.transform(samples.data) else { return }
This is the part where I begin to get lost/stuck in the apple tutorial. How can I create the spectrogram from here?
[TLDR: Receiving an ASSERTION FAILURE on CABufferList.h (find error at the bottom) when trying to save streamed audio data]
I am having trouble saving microphone audio that is streamed between devices using Multipeer Connectivity. So far I have two devices connected to each other using Multipeer Connectivity and have them sending messages and streams to each other.
Finally I have the StreamDelegate method
func stream(_ aStream: Stream, handle eventCode: Stream.Event) {
// create a buffer for capturing the inputstream data
let bufferSize = 2048
let buffer = UnsafeMutablePointer<UInt8>.allocate(capacity: bufferSize)
defer {
buffer.deallocate()
}
var audioBuffer: AudioBuffer!
var audioBufferList: AudioBufferList!
switch eventCode {
case .hasBytesAvailable:
// if the input stream has bytes available
// return the actual number of bytes placed in the buffer;
let read = self.inputStream.read(buffer, maxLength: bufferSize)
if read < 0 {
//Stream error occured
print(self.inputStream.streamError!)
} else if read == 0 {
//EOF
break
}
guard let mData = UnsafeMutableRawPointer(buffer) else { return }
audioBuffer = AudioBuffer(mNumberChannels: 1, mDataByteSize: UInt32(read), mData: mData)
audioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: audioBuffer)
let audioBufferListPointer = UnsafeMutablePointer<AudioBufferList>.allocate(capacity: read)
audioBufferListPointer.pointee = audioBufferList
DispatchQueue.main.async {
if self.ezRecorder == nil {
self.recordAudio()
}
self.ezRecorder?.appendData(from: audioBufferListPointer, withBufferSize: UInt32(read))
}
print("hasBytesAvailable \(audioBuffer!)")
case .endEncountered:
print("endEncountered")
if self.inputStream != nil {
self.inputStream.delegate = nil
self.inputStream.remove(from: .current, forMode: .default)
self.inputStream.close()
self.inputStream = nil
}
case .errorOccurred:
print("errorOccurred")
case .hasSpaceAvailable:
print("hasSpaceAvailable")
case .openCompleted:
print("openCompleted")
default:
break
}
}
I am getting the stream of data however when I try to save it as an audio file using EZRecorder, I get the following error message
[default] CABufferList.h:184 ASSERTION FAILURE [(nBytes <= buf->mDataByteSize) != 0 is false]:
I suspect the error could be arising when I create AudioStreamBasicDescription for EZRecorder.
I understand there may be other errors here and I appreciate any suggestions to solve the bug and improve the code. Thanks
EZAudio comes with TPCircularBuffer - use that.
Because writing the buffer to file is an async operation, this becomes a great use case for a circular buffer where we have one producer and one consumer.
Use the EZAudioUtilities where possible.
Update: EZRecorder write expects bufferSize to be number of frames to write and not bytes
So something like this should work:
class StreamDelegateInstance: NSObject {
private static let MaxReadSize = 2048
private static let BufferSize = MaxReadSize * 4
private var availableReadBytesPtr = UnsafeMutablePointer<Int32>.allocate(capacity: 1)
private var availableWriteBytesPtr = UnsafeMutablePointer<Int32>.allocate(capacity: 1)
private var ezRecorder: EZRecorder?
private var buffer = UnsafeMutablePointer<TPCircularBuffer>.allocate(capacity: 1)
private var inputStream: InputStream?
init(inputStream: InputStream? = nil) {
self.inputStream = inputStream
super.init()
EZAudioUtilities.circularBuffer(buffer, withSize: Int32(StreamDelegateInstance.BufferSize))
ensureWriteStream()
}
deinit {
EZAudioUtilities.freeCircularBuffer(buffer)
buffer.deallocate()
availableReadBytesPtr.deallocate()
availableWriteBytesPtr.deallocate()
self.ezRecorder?.closeAudioFile()
self.ezRecorder = nil
}
private func ensureWriteStream() {
guard self.ezRecorder == nil else { return }
// stores audio to temporary folder
let audioOutputPath = NSTemporaryDirectory() + "audioOutput2.aiff"
let audioOutputURL = URL(fileURLWithPath: audioOutputPath)
print(audioOutputURL)
// let audioStreamBasicDescription = AudioStreamBasicDescription(mSampleRate: 44100.0, mFormatID: kAudioFormatLinearPCM, mFormatFlags: kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked, mBytesPerPacket: 4, mFramesPerPacket: 1, mBytesPerFrame: 4, mChannelsPerFrame: 1, mBitsPerChannel: 32, mReserved: 1081729024)
// EZAudioUtilities.audioBufferList(withNumberOfFrames: <#T##UInt32#>,
// numberOfChannels: 1,
// interleaved: true)
// if you don't need a custom format, consider using EZAudioUtilities.m4AFormat
let format = EZAudioUtilities.aiffFormat(withNumberOfChannels: 1,
sampleRate: 44800)
self.ezRecorder = EZRecorder.init(url: audioOutputURL,
clientFormat: format,
fileType: .AIFF)
}
private func writeStream() {
let ptr = TPCircularBufferTail(buffer, availableWriteBytesPtr)
// ensure we have non 0 bytes to write - which should always be true, but you may want to refactor things
guard availableWriteBytesPtr.pointee > 0 else { return }
let framesToWrite = availableWriteBytesPtr.pointee / 4 // sizeof(float)
let audioBuffer = AudioBuffer(mNumberChannels: 1,
mDataByteSize: UInt32(availableWriteBytesPtr.pointee),
mData: ptr)
let audioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: audioBuffer)
self.ezRecorder?.appendData(from: &audioBufferList,
withBufferSize: UInt32(framesToWrite))
TPCircularBufferConsume(buffer, framesToWrite * 4)
}
}
extension StreamDelegateInstance: StreamDelegate {
func stream(_ aStream: Stream, handle eventCode: Stream.Event) {
switch eventCode {
case .hasBytesAvailable:
// if the input stream has bytes available
// return the actual number of bytes placed in the buffer;
guard let ptr = TPCircularBufferHead(buffer, availableReadBytesPtr) else {
print("couldn't get buffer ptr")
break;
}
let bytedsToRead = min(Int(availableReadBytesPtr.pointee), StreamDelegateInstance.MaxReadSize)
let mutablePtr = ptr.bindMemory(to: UInt8.self, capacity: Int(bytedsToRead))
let bytesRead = self.inputStream?.read(mutablePtr,
maxLength: bytedsToRead) ?? 0
if bytesRead < 0 {
//Stream error occured
print(self.inputStream?.streamError! ?? "No bytes read")
break
} else if bytesRead == 0 {
//EOF
break
}
TPCircularBufferProduce(buffer, Int32(bytesRead))
DispatchQueue.main.async { [weak self] in
self?.writeStream()
}
case .endEncountered:
print("endEncountered")
if self.inputStream != nil {
self.inputStream?.delegate = nil
self.inputStream?.remove(from: .current, forMode: .default)
self.inputStream?.close()
self.inputStream = nil
}
case .errorOccurred:
print("errorOccurred")
case .hasSpaceAvailable:
print("hasSpaceAvailable")
case .openCompleted:
print("openCompleted")
default:
break
}
}
}
I want to use this code for VoIP service.
i'm using web-socket and sending with it: let data = self.toNSData(PCMBuffer: buffer) and playback:let audioBuffer = self.toPCMBuffer(data: data) in another device)
I'm used: https://github.com/Lkember/IntercomTest
and worked it but the size of data is big. I'm feeling 41100 rates is a very big size for send data, I want to reduce buffer size with the lower rate to 8000.
but I do not know how to reduce sample rate without according error!
my failing code is below:
#IBAction func start(_ sender: Any) {
var engine = AVAudioEngine()
let input = engine.inputNode
let bus = 0
let localAudioPlayer: AVAudioPlayerNode = AVAudioPlayerNode()
let mixer = AVAudioMixerNode()
let fmt = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 8000, channels: 1, interleaved: false)
engine.attach(mixer)
engine.connect(input, to: mixer, format: input.outputFormat(forBus: 0))
mixer.volume = 0
engine.connect(mixer, to: localAudioPlayer, format: fmt)
localAudioPlayer.installTap(onBus: bus, bufferSize: 512, format: fmt) { (buffer, time) -> Void in
// 8kHz buffers!
print(buffer.format)
localAudioPlayer.scheduleBuffer(buffer)
}
let data = self.toNSData(PCMBuffer: buffer)
let audioBuffer = self.toPCMBuffer(data: data)
localAudioPlayer.scheduleBuffer(audioBuffer)
if (!localAudioPlayer.isPlaying) {
localAudioPlayer.play()
try! engine.start()
}
}
func toNSData(PCMBuffer: AVAudioPCMBuffer) -> NSData {
let channelCount = 1 // given PCMBuffer channel count is 1
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: channelCount)
let ch0Data = NSData(bytes: channels[0], length:Int(PCMBuffer.frameCapacity * PCMBuffer.format.streamDescription.pointee.mBytesPerFrame))
return ch0Data
}
func toPCMBuffer(data: NSData) -> AVAudioPCMBuffer {
let audioFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 8000, channels: 1, interleaved: false) // given NSData audio format
let PCMBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.length) / audioFormat.streamDescription.pointee.mBytesPerFrame)
PCMBuffer.frameLength = PCMBuffer.frameCapacity
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: Int(PCMBuffer.format.channelCount))
data.getBytes(UnsafeMutableRawPointer(channels[0]) , length: data.length)
return PCMBuffer
}
You can use below code to convert as you want.
let inputNode = audioEngine.inputNode
let downMixer = AVAudioMixerNode()
let main = audioEngine.mainMixerNode
let format = inputNode.inputFormat(forBus: 0)
let format16KHzMono = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: 8000, channels: 1, interleaved: true)
audioEngine.attach(downMixer)
downMixer.installTap(onBus: 0, bufferSize: 640, format: format16KHzMono) { (buffer, time) -> Void in
do{
print(buffer.description)
if let channel1Buffer = buffer.int16ChannelData?[0] {
// print(channel1Buffer[0])
for i in 0 ... Int(buffer.frameLength-1) {
print((channel1Buffer[i]))
}
}
}
}
audioEngine.connect(inputNode, to: downMixer, format: format)
audioEngine.connect(downMixer, to: main, format: format16KHzMono)
audioEngine.prepare()
try! audioEngine.start()
In Addition
you can use commonFormat instead settings parameter.
let format16KHzMono = AVAudioFormat(settings: [AVFormatIDKey: AVAudioCommonFormat.pcmFormatInt16,
AVEncoderAudioQualityKey: AVAudioQuality.high.rawValue,
AVEncoderBitRateKey: 16,
AVNumberOfChannelsKey: 1,
AVSampleRateKey: 8000.0] as [String : AnyObject])
I am trying to convert a determined AVAudioPCMBuffer (44.1khz, 1ch, float32, not interleaved) to another AVAudioPCMBuffer (16khz, 1ch, int16, not interleaved) using AVAudioConverter and write it using AVAudioFile.
My code uses the library AudioKit together with the tap AKLazyTap to get a buffer each determined time, based on this source:
https://github.com/AudioKit/AudioKit/tree/master/AudioKit/Common/Taps/Lazy%20Tap
Here is my implementation:
lazy var downAudioFormat: AVAudioFormat = {
let avAudioChannelLayout = AVAudioChannelLayout(layoutTag: kAudioChannelLayoutTag_Mono)!
return AVAudioFormat(
commonFormat: .pcmFormatInt16,
sampleRate: 16000,
interleaved: false,
channelLayout: avAudioChannelLayout)
}()
//...
AKSettings.sampleRate = 44100
AKSettings.numberOfChannels = AVAudioChannelCount(1)
AKSettings.ioBufferDuration = 0.002
AKSettings.defaultToSpeaker = true
//...
let mic = AKMicrophone()
let originalAudioFormat: AVAudioFormat = mic.avAudioNode.outputFormat(forBus: 0) //41.100, 1ch, float32...
let inputFrameCapacity = AVAudioFrameCount(1024)
//I don't think this is correct, the audio is getting chopped...
//How to calculate it correctly?
let outputFrameCapacity = AVAudioFrameCount(512)
guard let inputBuffer = AVAudioPCMBuffer(
pcmFormat: originalAudioFormat,
frameCapacity: inputFrameCapacity) else {
fatalError()
}
// Your timer should fire equal to or faster than your buffer duration
bufferTimer = Timer.scheduledTimer(
withTimeInterval: AKSettings.ioBufferDuration/2,
repeats: true) { [weak self] _ in
guard let unwrappedSelf = self else {
return
}
unwrappedSelf.lazyTap?.fillNextBuffer(inputBuffer, timeStamp: nil)
// This is important, since we're polling for samples, sometimes
//it's empty, and sometimes it will be double what it was the last call.
if inputBuffer.frameLength == 0 {
return
}
//This converter is only create once, as the AVAudioFile. Ignore this code I call a function instead.
let converter = AVAudioConverter(from: originalAudioFormat, to: downAudioFormat)
converter.sampleRateConverterAlgorithm = AVSampleRateConverterAlgorithm_Normal
converter.sampleRateConverterQuality = .min
converter.bitRateStrategy = AVAudioBitRateStrategy_Constant
guard let outputBuffer = AVAudioPCMBuffer(
pcmFormat: converter.outputFormat,
frameCapacity: outputFrameCapacity) else {
print("Failed to create new buffer")
return
}
let inputBlock: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = AVAudioConverterInputStatus.haveData
return inputBuffer
}
var error: NSError?
let status: AVAudioConverterOutputStatus = converter.convert(
to: outputBuffer,
error: &error,
withInputFrom: inputBlock)
switch status {
case .error:
if let unwrappedError: NSError = error {
print(unwrappedError)
}
return
default: break
}
//Only created once, instead of this code my code uses a function to verify if the AVAudioFile has been created, ignore it.
outputAVAudioFile = try AVAudioFile(
forWriting: unwrappedCacheFilePath,
settings: format.settings,
commonFormat: format.commonFormat,
interleaved: false)
do {
try outputAVAudioFile?.write(from: avAudioPCMBuffer)
} catch {
print(error)
}
}
(Please note that AVAudioConverter and AVAudioFile are being reused, the initialization there doesn't represent the real implementation on my code, just to simplify and make it more simple to understand.)
With frameCapacity on the outputBuffer: AVAudioPCMBuffer set to 512, the audio get chopped. Is there any way to discovery the correct frameCapacity for this buffer?
Written using Swift 4 and AudioKit 4.1.
Many thanks!
I managed to solve this problem installing a Tap on the inputNode like this:
lazy var downAudioFormat: AVAudioFormat = {
let avAudioChannelLayout = AVAudioChannelLayout(layoutTag: kAudioChannelLayoutTag_Mono)!
return AVAudioFormat(
commonFormat: .pcmFormatInt16,
sampleRate: SAMPLE_RATE,
interleaved: true,
channelLayout: avAudioChannelLayout)
}()
private func addBufferListener(_ avAudioNode: AVAudioNode) {
let originalAudioFormat: AVAudioFormat = avAudioNode.inputFormat(forBus: 0)
let downSampleRate: Double = downAudioFormat.sampleRate
let ratio: Float = Float(originalAudioFormat.sampleRate)/Float(downSampleRate)
let converter: AVAudioConverter = buildConverter(originalAudioFormat)
avAudioNode.installTap(
onBus: 0,
bufferSize: AVAudioFrameCount(downSampleRate * 2),
format: originalAudioFormat,
block: { (buffer: AVAudioPCMBuffer!, _ : AVAudioTime!) -> Void in
let capacity = UInt32(Float(buffer.frameCapacity)/ratio)
guard let outputBuffer = AVAudioPCMBuffer(
pcmFormat: self.downAudioFormat,
frameCapacity: capacity) else {
print("Failed to create new buffer")
return
}
let inputBlock: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = AVAudioConverterInputStatus.haveData
return buffer
}
var error: NSError?
let status: AVAudioConverterOutputStatus = converter.convert(
to: outputBuffer,
error: &error,
withInputFrom: inputBlock)
switch status {
case .error:
if let unwrappedError: NSError = error {
print("Error \(unwrappedError)"))
}
return
default: break
}
self.delegate?.flushAudioBuffer(outputBuffer)
})
}
I would like to use AVAudioEngine for a 3D audio effect, where the sound source circles the user head. The source appears to move from left to right but I've been unable to figure out how to make it circle the users head. The audio source must be mono or it cant work.
I dont understand AVAudioMake3DVectorOrientation and AVAudioMake3DAngularOrientation.
I thought my math was correct but I suspect that if it were, I would have gotten the results I was looking for.
This is bare bones, so there isn't much error checking.
Would someone provide guidance to get me on track?
Thank you,
W.
import AVFoundation
class ThreeDAudio {
var _angleIndx = 0.0
var _engine : AVAudioEngine!
var _player : AVAudioPlayerNode!
var _environment : AVAudioEnvironmentNode!
var _circleTimer : Timer!
func initTimer()
{
_circleTimer = Timer.scheduledTimer(timeInterval: 0.10, target: self,
selector: #selector(ThreeDAudio.updatePosition),userInfo: nil, repeats: true)
}
#objc func updatePosition()
{
let centerX = 0.0
let centerY = 0.0
let radius = 10.0
let degToRads = Double.pi / 180.0
let angle = _angleIndx * degToRads
let x = centerX + sin(angle) * radius
let y = centerY + cos(angle) * radius
let z = 0.0
let posInSpace = AVAudioMake3DPoint(Float(x), Float(y), Float(z))
_angleIndx += 1.0
_player.position = posInSpace
print("angle: \(_angleIndx) , \(posInSpace)")
if(_angleIndx == 360.0) { _circleTimer.invalidate() }
}
func getBufferFromFileInBundle(fileName: String, fileType: String) -> AVAudioPCMBuffer?
{
// audio MUST be a monoaural source or it cant work in 3D
var file:AVAudioFile
var audioBuffer : AVAudioPCMBuffer? = nil
let path = Bundle.main.path(forResource: fileName, ofType: fileType)!
do{
file = try AVAudioFile(forReading: URL(fileURLWithPath:path))
audioBuffer = AVAudioPCMBuffer(pcmFormat:(file.processingFormat), frameCapacity: AVAudioFrameCount(file.length))
try file.read(into: audioBuffer!)
} catch let error as NSError {
print("Error AVAudioFile:\(error)")
}
return audioBuffer
}
func outputFormat() -> AVAudioFormat
{
let outputFormat = _engine.outputNode.outputFormat(forBus: 0)
let nChannels = outputFormat.channelCount // testing, will always be 2 channels
let sampleRate = outputFormat.sampleRate
return AVAudioFormat(standardFormatWithSampleRate: sampleRate, channels: nChannels)
}
func setupEngine(_ audioBuffer: AVAudioPCMBuffer )
{
_engine = AVAudioEngine()
_player = AVAudioPlayerNode()
_environment = AVAudioEnvironmentNode()
_player.renderingAlgorithm = .HRTF
_engine.attach(_player)
_engine.attach(_environment)
_engine.connect(_player, to: _environment, format: audioBuffer.format)
_engine.connect(_environment, to: _engine.outputNode, format: outputFormat())
_environment.listenerPosition = AVAudioMake3DPoint(0.0, 0.0, 0.0);
_environment.listenerVectorOrientation = AVAudioMake3DVectorOrientation(AVAudioMake3DVector(0, 0, -1), AVAudioMake3DVector(0, 0, 0))
_environment.listenerAngularOrientation = AVAudioMake3DAngularOrientation(0.0,0.0, 0.0)
do{
try _engine.start()
} catch let error as NSError {
print("Error start:\(error)")
}
}
func startAudioTest()
{
var thisFile = (name: "", fileType: "")
thisFile = (name: "sound_voices", fileType: "wav")
thisFile = (name: "Bouncing-Ball-MONO", fileType: "aiff")
let audioBuffer = getBufferFromFileInBundle(fileName: thisFile.name, fileType: thisFile.fileType )
if ( audioBuffer != nil )
{
setupEngine( audioBuffer! )
initTimer()
_player.scheduleBuffer(audioBuffer!, at: nil, options: .loops, completionHandler: nil)
_player.play()
}
}
}