AVAudioEngine MIDI file play (Current progress + MIDI end callback) Swift - swift

Am playing MID using AVAudioEngine, AVAudioSequencer, AVAudioUnitSampler.
AVAudioUnitSampler loads Soundfont and AVAudioSequencer load MIDI file.
My initial configurations are
engine = AVAudioEngine()
sampler = AVAudioUnitSampler()
speedControl = AVAudioUnitVarispeed()
pitchControl = AVAudioUnitTimePitch()
engine.attach(sampler)
engine.attach(pitchControl)
engine.attach(speedControl)
engine.connect(sampler, to: speedControl, format: nil)
engine.connect(speedControl, to: pitchControl, format: nil)
engine.connect(pitchControl, to: engine.mainMixerNode, format: nil)
Here is how my sequence loads MIDI file
func setupSequencer() {
self.sequencer = AVAudioSequencer(audioEngine: self.engine)
let options = AVMusicSequenceLoadOptions()
let documentsDirectoryURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
let introurl = URL(string: songDetails!.intro!)
let midiFileURL = documentsDirectoryURL.appendingPathComponent(introurl!.lastPathComponent)
do {
try sequencer.load(from: midiFileURL, options: options)
print("loaded \(midiFileURL)")
} catch {
print("something screwed up \(error)")
return
}
sequencer.prepareToPlay()
if sequencer.isPlaying == false{
}
}
And here is how sampler load SoundFont
func loadSF2PresetIntoSampler(_ preset: UInt8,bankURL:URL ) {
do {
try self.sampler.loadSoundBankInstrument(at: bankURL,
program: preset,
bankMSB: UInt8(kAUSampler_DefaultMelodicBankMSB),
bankLSB: UInt8(kAUSampler_DefaultBankLSB))
} catch {
print("error loading sound bank instrument")
}
}
And it's playing fine no issue with this. I have 2 other requirements, and am having problem in those
I have to play another MIDI file after first MID ends playing, for that i need to get the complete/finish MIDI file callback from either Engine or Sequence OR How can i load multiple MIDI files in Sequence? I have tried many ways but didn't help.
I need to show the progress of MIDI file play, like current time and total time. For this i have tried a method found in stack answers somewhere which is:
var currentPositionInSeconds: TimeInterval {
get {
guard let offsetTime = offsetTime else { return 0 }
guard let lastRenderTime = engine.outputNode.lastRenderTime else { return 0 }
let frames = lastRenderTime.sampleTime - offsetTime.sampleTime
return Double(frames) / offsetTime.sampleRate
}
}
Here offsetTime is
offsetTime = engine.outputNode.lastRenderTime
And it always return nil.

Glad to see someone using my example code.
It's a missing feature. Please file a Radar. Nothing will happen without a Radar.
They do pay attention to them to schedule what to work on.
I fake it by getting the length of the sequence.
if let ft = sequencer.tracks.first {
self.lengthInSeconds = ft.lengthInSeconds
} else {
self.lengthInSeconds = 0
}
Then in my play function,
do {
try sequencer.start()
Timer.scheduledTimer(withTimeInterval: self.lengthInSeconds, repeats: false) {
[weak self] (t: Timer) in
guard let self = self else {return}
t.invalidate()
self.logger.debug("sequencer finished")
self.sequencer.stop()
}
...
You can use a DispatchSourceTimer if you want more accuracy.

Related

Is there a more detailed way to debug SFSpeechRecognizer?

Updated info below, and new code
I am trying to incorporate SFSpeechRecognizer into my app, and the errors/results I am getting from three pre-recorded audiofiles aren't enough for me to figure out what's going on. From the results I am getting I can't figure out what's wrong, and info via Google is sparse.
The code where I loop through three files is at the bottom. Here are the responses I get for my three audio files. I've made sure in each file to speak loudly and clearly, yet I still get: No speech detected or no text returned.
SS-X-03.m4a : There was an error: Optional(Error
Domain=kAFAssistantErrorDomain Code=1110 "No speech detected"
UserInfo={NSLocalizedDescription=No speech detected})
SS-X-20221125000.m4a : There was an error: Optional(Error
Domain=kAFAssistantErrorDomain Code=1110 "No speech detected"
UserInfo={NSLocalizedDescription=No speech detected})
SS-X-20221125001.m4a : (there is some text here if I set
request.requiresOnDeviceRecognition to false)
My code:
func findAudioFiles(){
let fm = FileManager.default
var aFiles : URL
print ("\(urlPath)")
do {
let items = try fm.contentsOfDirectory(atPath: documentsPath)
let filteredInterestArray1 = items.filter({$0.hasSuffix(".m4a")})
let filteredInterestArray2 = filteredInterestArray1.filter({$0.contains("SS-X-")})
let sortedItems = filteredInterestArray2.sorted()
for item in sortedItems {
audioFiles.append(item)
}
NotificationCenter.default.post(name: Notification.Name("goAndRead"), object: nil, userInfo: myDic)
} catch {
print ("\(error)")
}
}
#objc func goAndRead(){
audioIndex += 1
if audioIndex != audioFiles.count {
let fileURL = NSURL.fileURL(withPath: documentsPath + "/" + audioFiles[audioIndex], isDirectory: false)
transcribeAudio(url: fileURL, item: audioFiles[audioIndex])
}
}
func requestTranscribePermissions() {
SFSpeechRecognizer.requestAuthorization { [unowned self] authStatus in
DispatchQueue.main.async {
if authStatus == .authorized {
print("Good to go!")
} else {
print("Transcription permission was declined.")
}
}
}
}
func transcribeAudio(url: URL, item: String) {
guard let recognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US")) else {return}
let request = SFSpeechURLRecognitionRequest(url: url)
if !recognizer.supportsOnDeviceRecognition { print ("offline not available") ; return }
if !recognizer.isAvailable { print ("not available") ; return }
request.requiresOnDeviceRecognition = true
request.shouldReportPartialResults = true
recognizer.recognitionTask(with: request) {(result, error) in
guard let result = result else {
print("\(item) : There was an error: \(error.debugDescription)")
return
}
if result.isFinal {
print("\(item) : \(result.bestTranscription.formattedString)")
NotificationCenter.default.post(name: Notification.Name("goAndRead"), object: nil, userInfo: self.myDic)
}
}
}
Updated info
It appears that I was calling SFSpeechURLRecognitionRequest too often, and before I completed the first request. Perhaps I need to create a new instance of SFSpeechRecognizer? Unsure.
Regardless I quickly/sloppily adjusted the code to only run it once the previous instance returned its results.
The results were much better, except one audio file still came up as no results. Not an error, just no text.
This file is the same as the previous file, in that I took an audio recording and split it in two. So the formats and volumes are the same.
So I still need a better way to debug this, to find out what it going wrong with that file.

Recording speech synthesis to a saved file

Below is the code I've put together to attempt to take a phrase, save it to a file, then play that saved file. Not sure what area isn't working (not correct file name, not saving the file, not finding the file). Any help would be appreciated. (The speakPhrase is just a helper function to let me know that the speech synthesizer actually works, which it does).
import AVFoundation
import Foundation
class Coordinator {
let synthesizer: AVSpeechSynthesizer
var player: AVAudioPlayer?
init() {
let synthesizer = AVSpeechSynthesizer()
self.synthesizer = synthesizer
}
var recordingPath: URL {
let soundName = "Finally.caf"
// I've tried numerous file extensions. .caf was in an answer somewhere else. I would think it would be
// .pcm, but that doesn't work either.
// Local Directory
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
return paths[0].appendingPathComponent(soundName)
}
func speakPhrase(phrase: String) {
let utterance = AVSpeechUtterance(string: phrase)
utterance.voice = AVSpeechSynthesisVoice(language: "en")
synthesizer.speak(utterance)
}
func playFile() {
print("Trying to play the file")
do {
try AVAudioSession.sharedInstance().setCategory(.playback, mode: .default)
try AVAudioSession.sharedInstance().setActive(true)
player = try AVAudioPlayer(contentsOf: recordingPath, fileTypeHint: AVFileType.caf.rawValue)
guard let player = player else {return}
player.play()
} catch {
print("Error playing file.")
}
}
func saveAVSpeechUtteranceToFile() {
let utterance = AVSpeechUtterance(string: "This is speech to record")
utterance.voice = AVSpeechSynthesisVoice(language: "en-US")
utterance.rate = 0.50
synthesizer.write(utterance) { [self] (buffer: AVAudioBuffer) in
guard let pcmBuffer = buffer as? AVAudioPCMBuffer else {
fatalError("unknown buffer type: \(buffer)")
}
if pcmBuffer.frameLength == 0 {
// Done
} else {
// append buffer to file
do {
let audioFile = try AVAudioFile(forWriting: recordingPath, settings: pcmBuffer.format.settings, commonFormat: .pcmFormatInt16, interleaved: false)
try audioFile.write(from: pcmBuffer)
} catch {
print(error.localizedDescription)
}
}
}
}
}
Did you noticed the bufferCallback in the below function is called multiple times?
func write(_ utterance: AVSpeechUtterance,toBufferCallback bufferCallback: #escaping AVSpeechSynthesizer.BufferCallback)
So the root cause is pretty simple: the AVSpeechUtterance's audio is divided into multiple parts. On my iPhone, the callback calls about 20 times.
So if you create a new audio file in the closure every time, you will get a very tiny audio file(on my iPhone it was a 6kb audio file). That audio is not noticeable if you play it.
So replace the function to
func saveAVSpeechUtteranceToFile() {
let utterance = AVSpeechUtterance(string: "This is speech to record")
utterance.voice = AVSpeechSynthesisVoice(language: "en-US")
utterance.rate = 0.50
// Only create new file handle if `output` is nil.
var output: AVAudioFile?
synthesizer.write(utterance) { [self] (buffer: AVAudioBuffer) in
guard let pcmBuffer = buffer as? AVAudioPCMBuffer else {
fatalError("unknown buffer type: \(buffer)")
}
if pcmBuffer.frameLength == 0 {
// Done
} else {
do{
// this closure is called multiple times. so to save a complete audio, try create a file only for once.
if output == nil {
try output = AVAudioFile(
forWriting: recordingPath,
settings: pcmBuffer.format.settings,
commonFormat: .pcmFormatInt16,
interleaved: false)
}
try output?.write(from: pcmBuffer)
}catch {
print(error.localizedDescription)
}
}
}
}
BTW, I uploaded Github Demo here.
Finally, tell you how to inspect the file contents on an iOS device.
Xcode Window Menu -> Device and Simulators, do like below to copy out your app's content.

Change Sample rate with AudioConverter

I am trying to re-sample the input audio 44.1 kHz to 48 kHz.
using AudioToolbox's AUAudioUnit.inputHandler
writing out the input 44.1 kHZ to a wav file (this is working perfectly)
converting the 44.1 kHz to 48 kHz and writing out this converted bytes to file. https://developer.apple.com/documentation/audiotoolbox/1503098-audioconverterfillcomplexbuffer
The problem is in the 3rd step. After writing out to a file the voice is very noisy.
here is my code:
// convert to 48kHz
var audioConverterRef: AudioConverterRef?
CheckError(AudioConverterNew(&self.hardwareFormat,
&self.convertingFormat,
&audioConverterRef), "AudioConverterNew failed")
let outputBufferSize = inNumBytes
let outputBuffer = UnsafeMutablePointer<Int16>.allocate(capacity: MemoryLayout<Int16>.size * Int(outputBufferSize))
let convertedData = AudioBufferList.allocate(maximumBuffers: 1)
convertedData[0].mNumberChannels = self.hardwareFormat.mChannelsPerFrame
convertedData[0].mDataByteSize = outputBufferSize
convertedData[0].mData = UnsafeMutableRawPointer(outputBuffer)
var ioOutputDataPackets = UInt32(inNumPackets)
CheckError(AudioConverterFillComplexBuffer(audioConverterRef!,
self.coverterCallback,
&bufferList,
&ioOutputDataPackets,
convertedData.unsafeMutablePointer,
nil), "AudioConverterFillComplexBuffer error")
let convertedmData = convertedData[0].mData!
let convertedmDataByteSize = convertedData[0].mDataByteSize
// Write converted packets to file -> audio_unit_int16_48.wav
CheckError(AudioFileWritePackets(self.outputFile48000!,
false,
convertedmDataByteSize,
nil,
recordPacket,
&ioOutputDataPackets,
convertedmData), "AudioFileWritePackets error")
and the conversion callback body is here:
let buffers = UnsafeMutableBufferPointer<AudioBuffer>(start: &bufferList.mBuffers, count: Int(bufferList.mNumberBuffers))
let dataPtr = UnsafeMutableAudioBufferListPointer(ioData)
dataPtr[0].mNumberChannels = 1
dataPtr[0].mData = buffers[0].mData
dataPtr[0].mDataByteSize = buffers[0].mDataByteSize
ioDataPacketCount.pointee = buffers[0].mDataByteSize / UInt32(MemoryLayout<Int16>.size)
the sample project is here: https://drive.google.com/file/d/1GvCJ5hEqf7PsBANwUpVTRE1L7S_zQxnL/view?usp=sharing
If part of your chain is still AVAudioEngine, there's sample code from Apple for offline processing of AVAudioFiles.
Here's a modified version that includes the sampleRate change:
import Cocoa
import AVFoundation
import PlaygroundSupport
let outputSampleRate = 48_000.0
let outputAudioFormat = AVAudioFormat(standardFormatWithSampleRate: outputSampleRate, channels: 2)!
// file needs to be in ~/Documents/Shared Playground Data
let localURL = playgroundSharedDataDirectory.appendingPathComponent("inputFile_44.aiff")
let outputURL = playgroundSharedDataDirectory.appendingPathComponent("outputFile_48.aiff")
let sourceFile: AVAudioFile
let format: AVAudioFormat
do {
sourceFile = try AVAudioFile(forReading: localURL)
format = sourceFile.processingFormat
} catch {
fatalError("Unable to load the source audio file: \(error.localizedDescription).")
}
let sourceSettings = sourceFile.fileFormat.settings
var outputSettings = sourceSettings
outputSettings[AVSampleRateKey] = outputSampleRate
let engine = AVAudioEngine()
let player = AVAudioPlayerNode()
engine.attach(player)
// Connect the nodes.
engine.connect(player, to: engine.mainMixerNode, format: format)
// Schedule the source file.
player.scheduleFile(sourceFile, at: nil)
do {
// The maximum number of frames the engine renders in any single render call.
let maxFrames: AVAudioFrameCount = 4096
try engine.enableManualRenderingMode(.offline, format: outputAudioFormat,
maximumFrameCount: maxFrames)
} catch {
fatalError("Enabling manual rendering mode failed: \(error).")
}
do {
try engine.start()
player.play()
} catch {
fatalError("Unable to start audio engine: \(error).")
}
let buffer = AVAudioPCMBuffer(pcmFormat: engine.manualRenderingFormat, frameCapacity: engine.manualRenderingMaximumFrameCount)!
var outputFile: AVAudioFile?
do {
outputFile = try AVAudioFile(forWriting: outputURL, settings: outputSettings)
} catch {
fatalError("Unable to open output audio file: \(error).")
}
let outputLengthD = Double(sourceFile.length) * outputSampleRate / sourceFile.fileFormat.sampleRate
let outputLength = Int64(ceil(outputLengthD)) // no sample left behind
while engine.manualRenderingSampleTime < outputLength {
do {
let frameCount = outputLength - engine.manualRenderingSampleTime
let framesToRender = min(AVAudioFrameCount(frameCount), buffer.frameCapacity)
let status = try engine.renderOffline(framesToRender, to: buffer)
switch status {
case .success:
// The data rendered successfully. Write it to the output file.
try outputFile?.write(from: buffer)
case .insufficientDataFromInputNode:
// Applicable only when using the input node as one of the sources.
break
case .cannotDoInCurrentContext:
// The engine couldn't render in the current render call.
// Retry in the next iteration.
break
case .error:
// An error occurred while rendering the audio.
fatalError("The manual rendering failed.")
}
} catch {
fatalError("The manual rendering failed: \(error).")
}
}
// Stop the player node and engine.
player.stop()
engine.stop()
outputFile = nil // AVAudioFile won't close until it goes out of scope, so we set output file back to nil here

Playing a sound on macOS

I like to play a sound in my app. I found a lot of examples and also could compile and run them on Swift 3. But they are always for iOS. When imlementing this code in my app, AVAudioSession keeps undefined. What do I have to do? Is there something different for OSX?
import AVFoundation
...
var audioPlayer = AVAudioPlayer()
func PlaySound( ) {
let alertSound = URL(fileURLWithPath: Bundle.main.path(forResource: "Sound", ofType: "mp3")!)
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)
} catch _ {
}
do {
try AVAudioSession.sharedInstance().setActive(true)
} catch _ {
}
do {
audioPlayer = try AVAudioPlayer(contentsOf: alertSound)
} catch _{
}
audioPlayer.prepareToPlay()
audioPlayer.play()
}
Swift 3 / 4 / 4.2 / 5 - Easiest solution:
NSSound(named: "customSound")?.play()
I've used the following function in conjunction with Swift 3 and a Cocoa project.
import AVFoundation
func playSound(file:String, ext:String) -> Void {
let url = Bundle.main.url(forResource: file, withExtension: ext)!
do {
let player = try AVAudioPlayer(contentsOf: url)
player.prepareToPlay()
player.play()
} catch let error {
print(error.localizedDescription)
}
}
// usage //
playSound(file: "sound", ext: "caf") // where the file name is sound.caf.
Make sure that the target membership checkbox is on when you select an audio file in your Xcode project.
A modified version of El Tomato's answer,
here's the swift 4.2 class that I use quite often for sound:
Usage:
/// EXAMPLE 1
//calling this will create a "bop.m4a" player in memory, and, play it immediately
MakeSound.shared.playSound("bop.m4a")
//calling it a second time has a lot less overhead because the player is already in memory
MakeSound.shared.playSound("bop.m4a")
///EXAMPLE 2
//simply calls the above many times
MakeSound.shared.playSound(["bop.m4a", "beep.m4a", "bong.m4a"])
// EXAMPLE 3
//registers a shorthand, for quick calling
MakeSound.shared.registerSound(fileName: "bop", fileExtension: "m4a", shortHand: "b")
//plays using the shorthand, mostly handy because you define the actual file once,
//and can change the one line of code without having to change every reference
MakeSound.shared.playSound("b")
and the class:
class MakeSound {
private var players = [String : AVAudioPlayer]()
static var shared = MakeSound()
func playSound(fileName: String, fileExtension: String) {
if registerSound(fileName: fileName, fileExtension: fileExtension, shortHand: fileName+"."+fileExtension) {
playSound(fileName+"."+fileExtension)
}
}
//thought about naming this "playsoundS", but i like it with the same name, makes it easier to type
func playSound(_ shortHands : [String]){
for sh in shortHands{
playSound(sh)
}
}
///playSound("shorthand") OR playSound("mySound.mp3")
func playSound(_ shortHand : String) {
if let player = players[shortHand] {
player.prepareToPlay()
player.play()
} else {
if shortHand.contains(".") {
//going to assume that coder will not send "." as a filepath
//also going to assume that coder will not send "xyz." as filepath
var comp = shortHand.components(separatedBy: ".")
let ext = comp.last!
comp.removeLast()
if registerSound(fileName: comp.joined(separator: "."), fileExtension: ext, shortHand: shortHand) {
playSound(shortHand)
}
} else {
print("Sound request sent to makesound, no such shorthand, not a valid filename either.")
}
}
}
///registers a sound with makeSound, and returns a bool based on the success
#discardableResult func registerSound(fileName : String, fileExtension: String, shortHand: String) -> Bool {
guard let url = Bundle.main.url(forResource: fileName, withExtension: fileExtension) else {
print("Unable to register sound: \(shortHand). Filepath not valid.")
return false
}
do {
let player = try AVAudioPlayer(contentsOf: url)
players[shortHand] = player
} catch let error {
print("Audioplayer \"\(shortHand)\" unable to initialize:\n" + error.localizedDescription)
return false
}
return true
}
}
Now I will admit that NSSound is by far the easiest and most straight forward, yet, some of us prefer AVFoundation because we have a bit more control. (And such control is ironically removed in my class)
Even though it is little old-fashioned, you can simply enter the filename in the Attributes Inspector :
All you need to do first is to integrate a sound file into your project:

Swift - Press Button (completion handler issue)

I want a user to press a button, it changes background color (to yellow), a WAV is played and on completion of the WAV the button reverts to its original color (to red). So have a completion handler around the sound. Have tried various combinations of the code below but the WAV plays and the button doesn't appear to change color.
Is this the wrong approach or am I doing something wrong? Don't want to have to put completion handlers around the color changes as that, I presume, is overkill.
Many thanks.
typealias CompletionHandler = (success:Bool) -> Void
#IBAction func fireButton(sender: AnyObject) {
playLaser( { (success)-> Void in
if success {
self.shots -= 1
self.labelShotsLeft.text = String(self.shots)
} else {
}
})
}
func playLaser(completionHandler: CompletionHandler) {
fireButton.layer.backgroundColor = UIColor.yellowColor().CGColor
let url = NSBundle.mainBundle().URLForResource("laser", withExtension: "wav")!
do {
player = try AVAudioPlayer(contentsOfURL: url)
guard let player = player else { return }
player.prepareToPlay()
player.play()
} catch let error as NSError {
print(error.description)
}
self.fireButton.layer.backgroundColor = UIColor.redColor().CGColor
completionHandler(success: true)
}
To detect AVAudioPlayer finish playing, you need to use AVAudioPlayerDelegate.
You may need to write something like this:
func playLaser(completionHandler: CompletionHandler) {
fireButton.layer.backgroundColor = UIColor.yellowColor().CGColor
let url = NSBundle.mainBundle().URLForResource("laser", withExtension: "wav")!
do {
player = try AVAudioPlayer(contentsOfURL: url)
guard let player = player else { return }
player.delegate = self //<- Sorry, this was missing in my first post
player.play()
} catch let error as NSError {
print(error.description)
}
audioPlayerCompletionHandler = completionHandler
}
var audioPlayerCompletionHandler: CompletionHandler?
func audioPlayerDidFinishPlaying(player: AVAudioPlayer, successfully flag: Bool) {
self.fireButton.layer.backgroundColor = UIColor.redColor().CGColor
audioPlayerCompletionHandler?(success: true)
}
(You need to add conformance to AVAudioPlayerDelegate to your ViewController's declaration header.)
Code does not magically pause and wait just because you say play.play() — that would be horrible! Thus, your so-called completion handler is not a completion handler at all. It runs immediately — that is, as soon you start playing. Your code does nothing about obtaining information as to when the audio player has finished playing.
For that, you need to configure a delegate and receive the delegate message that audio player emits when it finishes playing.
This is one of those questions which is a little more subtle than meets the eye. I tried putting three completion handlers around each task: change colour to yellow, play sound, change colour back to red. The code was being executed in the correct sequence as I NSLogged it but the button never changed colour due to screen updating controls. Here is the code that works that I hope other readers might find useful:
Swift 2.0
#IBAction func fireButton(sender: AnyObject) {
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)) {
dispatch_sync(dispatch_get_main_queue()) {
self.fireButton.layer.backgroundColor = UIColor.yellowColor().CGColor
}
self.playLaser( { (success)-> Void in
if success {
self.shots -= 1
} else {
}
})
dispatch_sync(dispatch_get_main_queue()) {
self.labelShotsLeft.text = String(self.shots)
self.fireButton.layer.backgroundColor = UIColor.redColor().CGColor
}
}
}
func playLaser(completion: (success: Bool) -> ()) {
let url = NSBundle.mainBundle().URLForResource("laser", withExtension: "wav")!
do {
player = try AVAudioPlayer(contentsOfURL: url)
guard let player = player else { return }
player.play()
completion(success: true)
} catch let error as NSError {
completion(success: false)
}
}
Swift 3.0
#IBAction func fireButton(_ sender: AnyObject) {
let fireQueue = DispatchQueue(label: "queueFirebutton")
fireQueue.async {
DispatchQueue.main.sync {
self.fireButtonDisabled()
}
DispatchQueue.main.sync {
self.playLaser()
self.shots -= 1
if self.shots <= 0 {
self.shots = 0
}
}
DispatchQueue.main.sync {
if self.shots < 0 { self.shots = 0}
self.labelShotsLeft.text = String(self.shots)
sleep(1)
self.fireButtonEnabled()
}
}
}
func playLaser() {
let url = Bundle.main.url(forResource: "laser", withExtension: "wav")!
do {
player = try AVAudioPlayer(contentsOf: url)
guard let player = player else { return }
player.play()
} catch {
}
}