AVAudioPlayerNode jump to a specific time of the audio file - swift

I found similar questions here, here, and here but with none of the answers I have been able to solve the problem. Simply put, the audio does not jump at a specific moment but instead starts from scratch.
func seekTo(time: Double) {
player.stop()
let startSample = Double(time * audioSampleRate)
let lengthSamples: AVAudioFramePosition = AVAudioFramePosition(Double(audioLengthSamples) - startSample)
let frameCount = AVAudioFrameCount(audioLengthSamples - lengthSamples)
if currentPosition < audioLengthSamples {
player.scheduleSegment(audioFile!, startingFrame: AVAudioFramePosition(startSample), frameCount: AVAudioFrameCount(frameCount), at: nil, completionHandler: nil)
let wasPlaying = player.isPlaying
if wasPlaying {
player.play()
}
}
}
The time variable is where it receives the specific time to which the audio should skip.
Any help?

Related

Is there a way to create a spectrogram of an audio file using Swift and AudioKit?

I am trying to create a spectrogram, like the one in the image, from an audio file using Swift for a macOS app. I am using AppKit but could implement SwiftUI as well. I cam across audio kit and it seems like the perfect library to use for this type of thing, but I have not been able to find any examples of what I am looking for in an of the audio kit repositories, audio kit UI nor the cookbook. Is this something that is possible with audio kit? If so, can anyone help me with this?
Thanks so much!
I have previously tried using apple's example project and changed the code in the AudioSpectrogram + AVCaptureAudioDataOutputSampleBufferDelegate file. The original code is as follows:
extension AudioSpectrogram: AVCaptureAudioDataOutputSampleBufferDelegate {
public func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
var audioBufferList = AudioBufferList()
var blockBuffer: CMBlockBuffer?
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
bufferListSizeNeededOut: nil,
bufferListOut: &audioBufferList,
bufferListSize: MemoryLayout.stride(ofValue: audioBufferList),
blockBufferAllocator: nil,
blockBufferMemoryAllocator: nil,
flags: kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
blockBufferOut: &blockBuffer)
guard let data = audioBufferList.mBuffers.mData else {
return
}
/// The _Nyquist frequency_ is the highest frequency that a sampled system can properly
/// reproduce and is half the sampling rate of such a system. Although this app doesn't use
/// `nyquistFrequency` you may find this code useful to add an overlay to the user interface.
if nyquistFrequency == nil {
let duration = Float(CMSampleBufferGetDuration(sampleBuffer).value)
let timescale = Float(CMSampleBufferGetDuration(sampleBuffer).timescale)
let numsamples = Float(CMSampleBufferGetNumSamples(sampleBuffer))
nyquistFrequency = 0.5 / (duration / timescale / numsamples)
}
if self.rawAudioData.count < AudioSpectrogram.sampleCount * 2 {
let actualSampleCount = CMSampleBufferGetNumSamples(sampleBuffer)
let ptr = data.bindMemory(to: Int16.self, capacity: actualSampleCount)
let buf = UnsafeBufferPointer(start: ptr, count: actualSampleCount)
rawAudioData.append(contentsOf: Array(buf))
}
while self.rawAudioData.count >= AudioSpectrogram.sampleCount {
let dataToProcess = Array(self.rawAudioData[0 ..< AudioSpectrogram.sampleCount])
self.rawAudioData.removeFirst(AudioSpectrogram.hopCount)
self.processData(values: dataToProcess)
}
createAudioSpectrogram()
}
func configureCaptureSession() {
// Also note that:
//
// When running in iOS, you must add a "Privacy - Microphone Usage
// Description" entry.
//
// When running in macOS, you must add a "Privacy - Microphone Usage
// Description" entry to `Info.plist`, and check "audio input" and
// "camera access" under the "Resource Access" category of "Hardened
// Runtime".
switch AVCaptureDevice.authorizationStatus(for: .audio) {
case .authorized:
break
case .notDetermined:
sessionQueue.suspend()
AVCaptureDevice.requestAccess(for: .audio,
completionHandler: { granted in
if !granted {
fatalError("App requires microphone access.")
} else {
self.configureCaptureSession()
self.sessionQueue.resume()
}
})
return
default:
// Users can add authorization in "Settings > Privacy > Microphone"
// on an iOS device, or "System Preferences > Security & Privacy >
// Microphone" on a macOS device.
fatalError("App requires microphone access.")
}
captureSession.beginConfiguration()
#if os(macOS)
// Note than in macOS, you can change the sample rate, for example to
// `AVSampleRateKey: 22050`. This reduces the Nyquist frequency and
// increases the resolution at lower frequencies.
audioOutput.audioSettings = [
AVFormatIDKey: kAudioFormatLinearPCM,
AVLinearPCMIsFloatKey: false,
AVLinearPCMBitDepthKey: 16,
AVNumberOfChannelsKey: 1]
#endif
if captureSession.canAddOutput(audioOutput) {
captureSession.addOutput(audioOutput)
} else {
fatalError("Can't add `audioOutput`.")
}
guard
let microphone = AVCaptureDevice.default(.builtInMicrophone,
for: .audio,
position: .unspecified),
let microphoneInput = try? AVCaptureDeviceInput(device: microphone) else {
fatalError("Can't create microphone.")
}
if captureSession.canAddInput(microphoneInput) {
captureSession.addInput(microphoneInput)
}
captureSession.commitConfiguration()
}
/// Starts the audio spectrogram.
func startRunning() {
sessionQueue.async {
if AVCaptureDevice.authorizationStatus(for: .audio) == .authorized {
self.captureSession.startRunning()
}
}
}
}
I got rid of the configureCaptureSession function and replaced the rest of the code to get the following code:
public func captureBuffer() {
var samplesArray:[Int16] = []
let asset = AVAsset(url: audioFileUrl)
let reader = try! AVAssetReader(asset: asset)
let track = asset.tracks(withMediaType: AVMediaType.audio)[0]
let settings = [
AVFormatIDKey : kAudioFormatLinearPCM
]
let readerOutput = AVAssetReaderTrackOutput(track: track, outputSettings: settings)
reader.add(readerOutput)
reader.startReading()
while let buffer = readerOutput.copyNextSampleBuffer() {
var audioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: AudioBuffer(mNumberChannels: 1, mDataByteSize: 0, mData: nil))
var blockBuffer: CMBlockBuffer?
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
buffer,
bufferListSizeNeededOut: nil,
bufferListOut: &audioBufferList,
bufferListSize: MemoryLayout<AudioBufferList>.size,
blockBufferAllocator: nil,
blockBufferMemoryAllocator: nil,
flags: kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
blockBufferOut: &blockBuffer
);
let buffers = UnsafeBufferPointer<AudioBuffer>(start: &audioBufferList.mBuffers, count: Int(audioBufferList.mNumberBuffers))
for buffer in buffers {
let samplesCount = Int(buffer.mDataByteSize) / MemoryLayout<Int16>.size
let samplesPointer = audioBufferList.mBuffers.mData!.bindMemory(to: Int16.self, capacity: samplesCount)
let samples = UnsafeMutableBufferPointer<Int16>(start: samplesPointer, count: samplesCount)
for sample in samples {
//do something with you sample (which is Int16 amplitude value)
samplesArray.append(sample)
}
}
guard let data = audioBufferList.mBuffers.mData else {
return
}
/// The _Nyquist frequency_ is the highest frequency that a sampled system can properly
/// reproduce and is half the sampling rate of such a system. Although this app doesn't use
/// `nyquistFrequency` you may find this code useful to add an overlay to the user interface.
if nyquistFrequency == nil {
let duration = Float(CMSampleBufferGetDuration(buffer).value)
let timescale = Float(CMSampleBufferGetDuration(buffer).timescale)
let numsamples = Float(CMSampleBufferGetNumSamples(buffer))
nyquistFrequency = 0.5 / (duration / timescale / numsamples)
}
if self.rawAudioData.count < AudioSpectrogram.sampleCount * 2 {
let actualSampleCount = CMSampleBufferGetNumSamples(buffer)
let ptr = data.bindMemory(to: Int16.self, capacity: actualSampleCount)
let buf = UnsafeBufferPointer(start: ptr, count: actualSampleCount)
rawAudioData.append(contentsOf: Array(buf))
}
while self.rawAudioData.count >= AudioSpectrogram.sampleCount {
let dataToProcess = Array(self.rawAudioData[0 ..< AudioSpectrogram.sampleCount])
self.rawAudioData.removeFirst(AudioSpectrogram.hopCount)
self.processData(values: dataToProcess)
}
createAudioSpectrogram()
}
}
In AudioSpectrogram: CALayer file, I changed the original lines 10-30 from
public class AudioSpectrogram: CALayer {
// MARK: Initialization
override init() {
super.init()
contentsGravity = .resize
configureCaptureSession()
audioOutput.setSampleBufferDelegate(self,
queue: captureQueue)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override public init(layer: Any) {
super.init(layer: layer)
}
to the following:
public class AudioSpectrogram: CALayer {
#objc var audioFileUrl: URL
// MARK: Initialization
override init() {
self.audioFileUrl = selectedTrackUrl!
super.init()
contentsGravity = .resize
captureBuffer()
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override public init(layer: Any) {
self.audioFileUrl = selectedTrackUrl!
super.init(layer: layer)
}
The changed code allows me to specify the audio file to use when the Spectrogram is called from another area in my app.
The following is an example of what I am trying to achieve. It was done using FFMPEG.
Example Spectrogram
This is the output I get from my code:
Output Image
AudioKit is not the tool you want for this. You want AVFoundation. Apple has an example project of exactly what you're describing.
The tool at the heart of this is a DCT (discrete cosine transform) to convert windows of samples into a collection of component frequencies you can visualize. AVFoundation is the tool you use to turn your audio file or live recording into a buffer of audio samples so you can apply the DCT.
There actually is a Spectrogram in the AudioKitUI Swift package: https://github.com/AudioKit/AudioKitUI/blob/main/Sources/AudioKitUI/Visualizations/SpectrogramView.swift
You would need to pass it an AudioKit Node but it should be interchangeable with the other visualizers in the Cookbook.

How to do offset on AVAudioPCMBuffer?

I have AVAudioPCMBuffer, I set it as a source to the AVAudioPlayerNode
internal func play() {
guard let buf = pcmBuf else {
logger?.log(severity: .error, msg: "Sound pcmBuf is nil.")
return
}
if !isPlaying {
player.scheduleBuffer(buf, at: nil, options: .loops)
player.play()
}
}
It is possible that the user seeks forward, in order to do it I need to offset the buffer and set it again, like this
internal func play() {
guard let buf = pcmBuf else {
logger?.log(severity: .error, msg: "Sound pcmBuf is nil.")
return
}
buf.offset = 10 // Eg: in sec
if !isPlaying {
player.scheduleBuffer(buf, at: nil, options: .loops)
player.play()
}
}
So, it will look like I set a new buffer, but with the needed offset, and the playback will start from the required point.
The problem is that there is no offset method...
How to do it?
I don't think you need to offset the buffer. You should pass your play function a valid second parameter instead of nil.
https://developer.apple.com/documentation/avfaudio/avaudioplayernode/1388422-schedulebuffer
"Schedules the playing samples from an audio buffer at the time and playback options you specify."
So pass in a valid AVAudioTime.
https://developer.apple.com/documentation/avfaudio/avaudioplayernode#1669195
init(sampleTime: AVAudioFramePosition,
atRate sampleRate: Double)
I am guessing this is probably the initialiser you want.

How to play looping compressed soundtrack without using to much ram?

Right now I'm using AVAudioEngine, with AVAudioPlayer, AVAudioFile, AVAudioPCMBuffer to play couple a compressed soundtrack (m4a). My problem is that if the soundtrack is 40MB uncompressed and 1.8 in m4a when I load the sound in the buffer, the memory usage jump by 40MB (the uncompressed size of the file). How can I optimise that to use as little memory as possible?
Thanks.
let loopingBuffer : AVAudioPCMBuffer!
do{ let loopingFile = try AVAudioFile(forReading: fileURL)
loopingBuffer = AVAudioPCMBuffer(pcmFormat: loopingFile.processingFormat, frameCapacity: UInt32(loopingFile.length))!
do {
try loopingFile.read(into: loopingBuffer)
} catch
{
print(error)
}
} catch
{
print(error)
}
// player is AVAudioPlayerNode
player.scheduleBuffer(loopingBuffer, at: nil, options: [.loops])
Well, as a workaround, I decided to create a wrapper to split the audio into chunk of few second and playing and buffering them one at the time into the AVAudioPlayerNode.
As a result only a few seconds are RAM (twice that when buffering) at any time.
It brung the memory usage for my use case from 350Mo to less than 50Mo.
Here is the code, don't hesitate to use it or improve it (it's a first version). Any comments are welcome!
import Foundation
import AVFoundation
public class AVAudioStreamPCMPlayerWrapper
{
public var player: AVAudioPlayerNode
public let audioFile: AVAudioFile
public let bufferSize: TimeInterval
public let url: URL
public private(set) var loopingCount: Int = 0
/// Equal to the repeatingTimes passed in the initialiser.
public let numberOfLoops: Int
/// The time passed in the initialisation parameter for which the player will preload the next buffer to have a smooth transition.
/// The default value is 1s.
/// Note : better not go under 1s since the buffering mecanism can be triggered with a relative precision.
public let preloadTime: TimeInterval
public private(set) var scheduled: Bool = false
private let framePerBuffer: AVAudioFrameCount
/// To identify the the schedule cycle we are executed
/// Since the thread work can't be stopped when they are scheduled
/// we need to be sure that the execution of the work is done for the current playing cycle.
/// For exemple if the player has been stopped and restart before the async call has executed.
private var scheduledId: Int = 0
/// the time since the track started.
private var startingDate: Date = Date()
/// The date used to measure the difference between the moment the buffering should have occure and the actual moment it did.
/// Hence, we can adjust the next trigger of the buffering time to prevent the delay to accumulate.
private var lastBufferingDate = Date()
/// This class allow us to play a sound, once or multiple time without overloading the RAM.
/// Instead of loading the full sound into memory it only reads a segment of it at a time, preloading the next segment to avoid stutter.
/// - Parameters:
/// - url: The URL of the sound to be played.
/// - bufferSize: The size of the segment of the sound being played. Must be greater than preloadTime.
/// - repeatingTimes: How many time the sound must loop (0 it's played only once 1 it's played twice : repeating once)
/// -1 repeating indéfinitly.
/// - preloadTime: 1 should be the minimum value since the preloading mecanism can be triggered not precesily on time.
/// - Throws: Throws the error the AVAudioFile would throw if it couldn't be created with the URL passed in parameter.
public init(url: URL, bufferSize: TimeInterval, isLooping: Bool, repeatingTimes: Int = -1, preloadTime: TimeInterval = 1)throws
{
self.url = url
self.player = AVAudioPlayerNode()
self.bufferSize = bufferSize
self.numberOfLoops = repeatingTimes
self.preloadTime = preloadTime
try self.audioFile = AVAudioFile(forReading: url)
framePerBuffer = AVAudioFrameCount(audioFile.fileFormat.sampleRate*bufferSize)
}
public func scheduleBuffer()
{
scheduled = true
scheduledId += 1
scheduleNextBuffer(offset: preloadTime)
}
public func play()
{
player.play()
startingDate = Date()
scheduleNextBuffer(offset: preloadTime)
}
public func stop()
{
reset()
scheduleBuffer()
}
public func reset()
{
player.stop()
player.reset()
scheduled = false
audioFile.framePosition = 0
}
/// The first time this method is called the timer is offset by the preload time, then since the timer is repeating and has already been offset
/// we don't need to offset it again the second call.
private func scheduleNextBuffer(offset: TimeInterval)
{
guard scheduled else {return}
if audioFile.length == audioFile.framePosition
{
guard numberOfLoops == -1 || loopingCount < numberOfLoops else {return}
audioFile.framePosition = 0
loopingCount += 1
}
let buffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat, frameCapacity: framePerBuffer)!
let frameCount = min(framePerBuffer, AVAudioFrameCount(audioFile.length - audioFile.framePosition))
print("\(audioFile.framePosition/48000) \(url.relativeString)")
do
{
try audioFile.read(into: buffer, frameCount: frameCount)
DispatchQueue.global().async(group: nil, qos: DispatchQoS.userInteractive, flags: .enforceQoS) { [weak self] in
self?.player.scheduleBuffer(buffer, at: nil, options: .interruptsAtLoop)
self?.player.prepare(withFrameCount: frameCount)
}
let nextCallTime = max(TimeInterval( Double(frameCount) / audioFile.fileFormat.sampleRate) - offset, 0)
planNextPreloading(nextCallTime: nextCallTime)
} catch
{
print("audio file read error : \(error)")
}
}
private func planNextPreloading(nextCallTime: TimeInterval)
{
guard self.player.isPlaying else {return}
let id = scheduledId
lastBufferingDate = Date()
DispatchQueue.global().asyncAfter(deadline: .now() + nextCallTime, qos: DispatchQoS.userInteractive) { [weak self] in
guard let self = self else {return}
guard id == self.scheduledId else {return}
let delta = -(nextCallTime + self.lastBufferingDate.timeIntervalSinceNow)
self.scheduleNextBuffer(offset: delta)
}
}
}

Continuous listen the user voice and detect end of speech silence in SpeechKit framework

I have working an application where we need to open certain screen based on voice command like if user says "Open Setting" then it should open the setting screen, so far that I have used the SpeechKit framework but I am not able to detect the end of speech silence. Like how Siri does it. I want to detect if the user has ended his sentence/phrase.
Please find the below code for same where I have integrate the SpeechKit framework in two ways.
A) Via closure(recognitionTask(with request: SFSpeechRecognitionRequest, resultHandler: #escaping (SFSpeechRecognitionResult?, Error?) -> Swift.Void) -> SFSpeechRecognitionTask)
let audioEngine = AVAudioEngine()
let speechRecognizer = SFSpeechRecognizer()
let request = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask: SFSpeechRecognitionTask?
func startRecording() throws {
let node = audioEngine.inputNode
let recordingFormat = node.outputFormat(forBus: 0)
node.installTap(onBus: 0, bufferSize: 1024,
format: recordingFormat) { [unowned self]
(buffer, _) in
self.request.append(buffer)
}
audioEngine.prepare()
try audioEngine.start()
weak var weakSelf = self
recognitionTask = speechRecognizer?.recognitionTask(with: request) {
(result, error) in
if result != nil {
if let transcription = result?.bestTranscription {
weakSelf?.idenifyVoiceCommand(transcription)
}
}
}
}
But when I say any word/sentence like "Open Setting" then closure(recognitionTask(with:)) called multiple times and I have put the method(idenifyVoiceCommand) inside the closure which call multiple times, so how can I restrict to call only one time.
And I also review the Timer logic while googling it(SFSpeechRecognizer - detect end of utterance) but in my scenarion it does not work beacause I did not stop the audio engine as it continuously listening the user’s voice like Siri does.
B) Via delegate(SFSpeechRecognitionTaskDelegate)
speechRecognizer.recognitionTask(with: self.request, delegate: self)
func speechRecognitionTaskWasCancelled(_ task: SFSpeechRecognitionTask) {
}
func speechRecognitionTask(_ task: SFSpeechRecognitionTask, didFinishSuccessfully successfully: Bool) {
}
And I found that the delegate which handle when the end of speech occurs do not call it and accidentally call it after sometimes.
I had the same issue until now.
I checked your question and I suppose the code below helps you achieve the same thing I did:
recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest,
resultHandler: { (result, error) in
var isFinal = false
if result != nil {
self.inputTextView.text = result?.bestTranscription.formattedString
isFinal = (result?.isFinal)!
}
if let timer = self.detectionTimer, timer.isValid {
if isFinal {
self.inputTextView.text = ""
self.textViewDidChange(self.inputTextView)
self.detectionTimer?.invalidate()
}
} else {
self.detectionTimer = Timer.scheduledTimer(withTimeInterval: 1.5, repeats: false, block: { (timer) in
self.handleSend()
isFinal = true
timer.invalidate()
})
}
})
This checks if input wasn't received for 1.5 seconds
To your speech recogniser class add:
private var timer : Timer?
And modify code here:
recognitionTask = speechRecognizer.recognitionTask(with: request) { (result, error) in
self.timer?.invalidate()
self.timer = Timer.scheduledTimer(withTimeInterval: 1.5, repeats:false) { _ in
self.timer = nil
//do here what do you want to do, when detect pause more than 1.5 sec
}
if result != nil {

Xcode 8 Swift 3 Pitch-altering sounds

I'm trying to make a simple game with a hit sound that has a different pitch whenever you hit something. I thought it'd be simple, but it ended up with a whole lot of stuff (most of which I completely copied from someone else):
func hitSound(value: Float) {
let audioPlayerNode = AVAudioPlayerNode()
audioPlayerNode.stop()
engine.stop() // This is an AVAudioEngine defined previously
engine.reset()
engine.attach(audioPlayerNode)
let changeAudioUnitTime = AVAudioUnitTimePitch()
changeAudioUnitTime.pitch = value
engine.attach(changeAudioUnitTime)
engine.connect(audioPlayerNode, to: changeAudioUnitTime, format: nil)
engine.connect(changeAudioUnitTime, to: engine.outputNode, format: nil)
audioPlayerNode.scheduleFile(file, at: nil, completionHandler: nil) // File is an AVAudioFile defined previously
try? engine.start()
audioPlayerNode.play()
}
Since this code seems to stop playing any sounds currently being played in order to play the new sound, is there a way I can alter this behaviour so it doesn't stop playing anything? I tried removing the engine.stop and engine.reset bits, but this just crashes the app. Also, this code is incredibly slow when called frequently. Is there something I could do to speed it up? This hit sound is needed very frequently.
You're resetting the engine every time you play a sound! And you're creating extra player nodes - it's actually much simpler than that if you only want one instance of the pitch shifted sound playing at once:
// instance variables
let engine = AVAudioEngine()
let audioPlayerNode = AVAudioPlayerNode()
let changeAudioUnitTime = AVAudioUnitTimePitch()
call setupAudioEngine() once:
func setupAudioEngine() {
engine.attach(self.audioPlayerNode)
engine.attach(changeAudioUnitTime)
engine.connect(audioPlayerNode, to: changeAudioUnitTime, format: nil)
engine.connect(changeAudioUnitTime, to: engine.outputNode, format: nil)
try? engine.start()
audioPlayerNode.play()
}
and call hitSound() as many times as you like:
func hitSound(value: Float) {
changeAudioUnitTime.pitch = value
audioPlayerNode.scheduleFile(file, at: nil, completionHandler: nil) // File is an AVAudioFile defined previously
}
p.s. pitch can be shifted two octaves up or down, for a range of 4 octaves, and lies in the numerical range of [-2400, 2400], having the unit "cents".
p.p.s AVAudioUnitTimePitch is very cool technology. We definitely didn't have anything like it when I was a kid.
UPDATE
If you want multi channel, you can easily set up multiple player and pitch nodes, however you must choose the number of channels before you start the engine. Here's how you'd do two (it's easy to extend to n instances, and you'll probably want to choose your own method of choosing which channel to interrupt when all are playing):
// instance variables
let engine = AVAudioEngine()
var nextPlayerIndex = 0
let audioPlayers = [AVAudioPlayerNode(), AVAudioPlayerNode()]
let pitchUnits = [AVAudioUnitTimePitch(), AVAudioUnitTimePitch()]
func setupAudioEngine() {
var i = 0
for playerNode in audioPlayers {
let pitchUnit = pitchUnits[i]
engine.attach(playerNode)
engine.attach(pitchUnit)
engine.connect(playerNode, to: pitchUnit, format: nil)
engine.connect(pitchUnit, to:engine.mainMixerNode, format: nil)
i += 1
}
try? engine.start()
for playerNode in audioPlayers {
playerNode.play()
}
}
func hitSound(value: Float) {
let playerNode = audioPlayers[nextPlayerIndex]
let pitchUnit = pitchUnits[nextPlayerIndex]
pitchUnit.pitch = value
// interrupt playing sound if you have to
if playerNode.isPlaying {
playerNode.stop()
playerNode.play()
}
playerNode.scheduleFile(file, at: nil, completionHandler: nil) // File is an AVAudioFile defined previously
nextPlayerIndex = (nextPlayerIndex + 1) % audioPlayers.count
}