Below is the code I've put together to attempt to take a phrase, save it to a file, then play that saved file. Not sure what area isn't working (not correct file name, not saving the file, not finding the file). Any help would be appreciated. (The speakPhrase is just a helper function to let me know that the speech synthesizer actually works, which it does).
import AVFoundation
import Foundation
class Coordinator {
let synthesizer: AVSpeechSynthesizer
var player: AVAudioPlayer?
init() {
let synthesizer = AVSpeechSynthesizer()
self.synthesizer = synthesizer
}
var recordingPath: URL {
let soundName = "Finally.caf"
// I've tried numerous file extensions. .caf was in an answer somewhere else. I would think it would be
// .pcm, but that doesn't work either.
// Local Directory
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
return paths[0].appendingPathComponent(soundName)
}
func speakPhrase(phrase: String) {
let utterance = AVSpeechUtterance(string: phrase)
utterance.voice = AVSpeechSynthesisVoice(language: "en")
synthesizer.speak(utterance)
}
func playFile() {
print("Trying to play the file")
do {
try AVAudioSession.sharedInstance().setCategory(.playback, mode: .default)
try AVAudioSession.sharedInstance().setActive(true)
player = try AVAudioPlayer(contentsOf: recordingPath, fileTypeHint: AVFileType.caf.rawValue)
guard let player = player else {return}
player.play()
} catch {
print("Error playing file.")
}
}
func saveAVSpeechUtteranceToFile() {
let utterance = AVSpeechUtterance(string: "This is speech to record")
utterance.voice = AVSpeechSynthesisVoice(language: "en-US")
utterance.rate = 0.50
synthesizer.write(utterance) { [self] (buffer: AVAudioBuffer) in
guard let pcmBuffer = buffer as? AVAudioPCMBuffer else {
fatalError("unknown buffer type: \(buffer)")
}
if pcmBuffer.frameLength == 0 {
// Done
} else {
// append buffer to file
do {
let audioFile = try AVAudioFile(forWriting: recordingPath, settings: pcmBuffer.format.settings, commonFormat: .pcmFormatInt16, interleaved: false)
try audioFile.write(from: pcmBuffer)
} catch {
print(error.localizedDescription)
}
}
}
}
}
Did you noticed the bufferCallback in the below function is called multiple times?
func write(_ utterance: AVSpeechUtterance,toBufferCallback bufferCallback: #escaping AVSpeechSynthesizer.BufferCallback)
So the root cause is pretty simple: the AVSpeechUtterance's audio is divided into multiple parts. On my iPhone, the callback calls about 20 times.
So if you create a new audio file in the closure every time, you will get a very tiny audio file(on my iPhone it was a 6kb audio file). That audio is not noticeable if you play it.
So replace the function to
func saveAVSpeechUtteranceToFile() {
let utterance = AVSpeechUtterance(string: "This is speech to record")
utterance.voice = AVSpeechSynthesisVoice(language: "en-US")
utterance.rate = 0.50
// Only create new file handle if `output` is nil.
var output: AVAudioFile?
synthesizer.write(utterance) { [self] (buffer: AVAudioBuffer) in
guard let pcmBuffer = buffer as? AVAudioPCMBuffer else {
fatalError("unknown buffer type: \(buffer)")
}
if pcmBuffer.frameLength == 0 {
// Done
} else {
do{
// this closure is called multiple times. so to save a complete audio, try create a file only for once.
if output == nil {
try output = AVAudioFile(
forWriting: recordingPath,
settings: pcmBuffer.format.settings,
commonFormat: .pcmFormatInt16,
interleaved: false)
}
try output?.write(from: pcmBuffer)
}catch {
print(error.localizedDescription)
}
}
}
}
BTW, I uploaded Github Demo here.
Finally, tell you how to inspect the file contents on an iOS device.
Xcode Window Menu -> Device and Simulators, do like below to copy out your app's content.
Related
I'm creating a process to read an existing audio file, add an effect using AVAudioEngine, and then save it as another audio file.
However, with the following method using an AVAudioPlayerNode, the save process must wait until the end of playback.
import UIKit
import AVFoundation
class ViewController: UIViewController {
let engine = AVAudioEngine()
let playerNode = AVAudioPlayerNode()
let reverbNode = AVAudioUnitReverb()
override func viewDidLoad() {
super.viewDidLoad()
do {
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "original", ofType: "mp3")!)
let file = try AVAudioFile(forReading: url)
// playerNode
engine.attach(playerNode)
// reverbNode
reverbNode.loadFactoryPreset(.largeChamber)
reverbNode.wetDryMix = 5.0
engine.attach(reverbNode)
engine.connect(playerNode, to: reverbNode, format: file.processingFormat)
engine.connect(reverbNode, to: engine.mainMixerNode, format: file.processingFormat)
playerNode.scheduleFile(file, at: nil, completionCallbackType: .dataPlayedBack){ [self] _ in
reverbNode.removeTap(onBus: 0)
}
// start
try engine.start()
playerNode.play()
let url2 = URL(fileURLWithPath: fileInDocumentsDirectory(filename: "changed.wav"))
let outputFile = try! AVAudioFile(forWriting: url2, settings: playerNode.outputFormat(forBus: 0).settings)
reverbNode.installTap(onBus: 0, bufferSize: AVAudioFrameCount(reverbNode.outputFormat(forBus: 0).sampleRate), format: reverbNode.outputFormat(forBus: 0)) { (buffer, when) in
do {
try outputFile.write(from: buffer)
} catch let error {
print(error)
}
}
} catch {
print(error.localizedDescription)
}
}
func getDocumentsURL() -> NSURL {
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] as NSURL
return documentsURL
}
func fileInDocumentsDirectory(filename: String) -> String {
let fileURL = getDocumentsURL().appendingPathComponent(filename)
return fileURL!.path
}
}
Is there a way to complete the writing without waiting for the playback to complete? My ideal is to complete the write in the time required by CPU and storage performance.
It seems that
reverbNode.installTap(...) { (buffer, when) in ...}
in the code is processed in parallel with the current playback position. But I would like to dramatically improve the processing speed.
Best regards.
Hello I am trying to implement simple audio app using AVAudioEngine, which plays short wav audio files in a loop at some bpm, that can be changed in real time (by slider or something).
Current solution logic:
set bpm=60
create audioFile from sample.wav
calculate bufferSize: AVAudioFrameCount(audioFile.processingFormat.sampleRate * 60 / Double(bpm))
set bufferSize to audioBuffer
load file audioFile into audioBuffer.
schedule audioBuffer to play
This solution works, but the issue is - if I want to change bpm I need to recreate buffer with different bufferSize, so it will not be in real time, since I need to stop player and reschedule buffer with different bufferSize.
Any thoughts how it can be done ?
Thanks in advance !
Code (main part):
var bpm:Float = 30
let engine = AVAudioEngine()
var player = AVAudioPlayerNode()
var audioBuffer: AVAudioPCMBuffer?
var audioFile: AVAudioFile?
override func viewDidLoad() {
super.viewDidLoad()
audioFile = loadfile(from: "sound.wav")
audioBuffer = tickBuffer(audioFile: audioFile!)
engine.attach(player)
engine.connect(player, to: engine.mainMixerNode, format: audioFile?.processingFormat)
do {
engine.prepare()
try engine.start()
} catch {
print(error)
}
}
private func loadfile(from fileName: String) -> AVAudioFile? {
let path = Bundle.main.path(forResource: fileName, ofType: nil)!
let url = URL(fileURLWithPath: path)
do {
let audioFile = try AVAudioFile(forReading: url)
return audioFile
} catch {
print("Error loading buffer1 \(error)")
}
return nil
}
func tickBuffer(audioFile: AVAudioFile) -> AVAudioPCMBuffer {
let periodLength = AVAudioFrameCount(audioFile.processingFormat.sampleRate * 60 / Double(bpm))
let buffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat, frameCapacity: periodLength)!
try! audioFile.read(into: buffer)
buffer.frameLength = periodLength
return buffer
}
func play() {
player.scheduleBuffer(audioBuffer, at: nil, options: .loops, completionHandler: nil)
player.play()
}
func stop() {
player.stop()
}
Am playing MID using AVAudioEngine, AVAudioSequencer, AVAudioUnitSampler.
AVAudioUnitSampler loads Soundfont and AVAudioSequencer load MIDI file.
My initial configurations are
engine = AVAudioEngine()
sampler = AVAudioUnitSampler()
speedControl = AVAudioUnitVarispeed()
pitchControl = AVAudioUnitTimePitch()
engine.attach(sampler)
engine.attach(pitchControl)
engine.attach(speedControl)
engine.connect(sampler, to: speedControl, format: nil)
engine.connect(speedControl, to: pitchControl, format: nil)
engine.connect(pitchControl, to: engine.mainMixerNode, format: nil)
Here is how my sequence loads MIDI file
func setupSequencer() {
self.sequencer = AVAudioSequencer(audioEngine: self.engine)
let options = AVMusicSequenceLoadOptions()
let documentsDirectoryURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
let introurl = URL(string: songDetails!.intro!)
let midiFileURL = documentsDirectoryURL.appendingPathComponent(introurl!.lastPathComponent)
do {
try sequencer.load(from: midiFileURL, options: options)
print("loaded \(midiFileURL)")
} catch {
print("something screwed up \(error)")
return
}
sequencer.prepareToPlay()
if sequencer.isPlaying == false{
}
}
And here is how sampler load SoundFont
func loadSF2PresetIntoSampler(_ preset: UInt8,bankURL:URL ) {
do {
try self.sampler.loadSoundBankInstrument(at: bankURL,
program: preset,
bankMSB: UInt8(kAUSampler_DefaultMelodicBankMSB),
bankLSB: UInt8(kAUSampler_DefaultBankLSB))
} catch {
print("error loading sound bank instrument")
}
}
And it's playing fine no issue with this. I have 2 other requirements, and am having problem in those
I have to play another MIDI file after first MID ends playing, for that i need to get the complete/finish MIDI file callback from either Engine or Sequence OR How can i load multiple MIDI files in Sequence? I have tried many ways but didn't help.
I need to show the progress of MIDI file play, like current time and total time. For this i have tried a method found in stack answers somewhere which is:
var currentPositionInSeconds: TimeInterval {
get {
guard let offsetTime = offsetTime else { return 0 }
guard let lastRenderTime = engine.outputNode.lastRenderTime else { return 0 }
let frames = lastRenderTime.sampleTime - offsetTime.sampleTime
return Double(frames) / offsetTime.sampleRate
}
}
Here offsetTime is
offsetTime = engine.outputNode.lastRenderTime
And it always return nil.
Glad to see someone using my example code.
It's a missing feature. Please file a Radar. Nothing will happen without a Radar.
They do pay attention to them to schedule what to work on.
I fake it by getting the length of the sequence.
if let ft = sequencer.tracks.first {
self.lengthInSeconds = ft.lengthInSeconds
} else {
self.lengthInSeconds = 0
}
Then in my play function,
do {
try sequencer.start()
Timer.scheduledTimer(withTimeInterval: self.lengthInSeconds, repeats: false) {
[weak self] (t: Timer) in
guard let self = self else {return}
t.invalidate()
self.logger.debug("sequencer finished")
self.sequencer.stop()
}
...
You can use a DispatchSourceTimer if you want more accuracy.
I am using an AVAudioRecorder to record human voice. My code is as follows:
// Property of Class
var recorder:AVAudioRecorder?
func recordButtonTapped() {
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setActive(true)
if audioSession.recordPermission() != .granted {
audioSession.requestRecordPermission({ (success) in
self.startRecording()
})
}
else {
self.startRecording()
}
} catch {
print("Unable To Set Category")
}
}
func startRecording() {
// libraryPathWith(media) just gets the path to the documents directory
// Like so: Documents/MediaLibrary/Audio/<mediaID>.<mediaExtension>
if let path = MMFileManager.libraryPathWith(media: self.media) {
isRecording = true
do {
let settings = [
AVFormatIDKey: Int(kAudioFormatMPEG4AAC),
AVSampleRateKey: 44100,
AVNumberOfChannelsKey: 1,
AVEncoderAudioQualityKey: AVAudioQuality.high.rawValue
]
recorder = try AVAudioRecorder(url: path, settings: settings)
recorder?.delegate = self
if recorder!.prepareToRecord() {
recorder?.record()
}
}
catch {
isRecording = false
}
}
}
func stopRecording() {
self.recordingLabel.text = "Recording Complete"
self.recordingLabel.textColor = UIColor.white
if let rec = recorder {
rec.stop()
recorder = nil
}
isRecording = false
}
AVAudioRecorderDelegate
func audioRecorderDidFinishRecording(_ recorder: AVAudioRecorder, successfully flag: Bool) {
print("RECORDED AUDIO SUCCESSFULLY \(flag)")
}
func audioRecorderEncodeErrorDidOccur(_ recorder: AVAudioRecorder, error: Error?) {
print("AUDIO RECORDER ERROR \(error?.localizedDescription)")
}
After I call stop on the AVAudioRecorder the audioRecorderEncodeErrorDidOccur function never gets called, but the audioRecorderDidFinishRecording function does but the flag is always false. It prints out "RECORDED AUDIO SUCCESSFULLY false"
QUESTION
When I record using the code above I does save a file to my documents directory at the location specified. But this file is not something that I can play. It writes a text file, not an audio file as I specify the extension to be .aac.
Why does the AVAudioRecorder not record audio? And how do I get it to do so?
This is how I did it, first import
AVFoundation
and add the AVAudioRecorderDelegate to your ViewController:
class RecordViewController: UIViewController, AVAudioRecorderDelegate
then create a global instance of the AVAudioRecorder:
var audioRecorder : AVAudioRecorder!
Then I created a record button that starts the recording:
#IBAction func playButton(_ sender: Any) {
let dirPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] as String
let recordingName = "voiceRecording.wav"
let pathArray = [dirPath, recordingName]
let filePath = URL(string: pathArray.joined(separator: "/"))
let session = AVAudioSession.sharedInstance()
try! session.setCategory(AVAudioSessionCategoryPlayAndRecord, with: .defaultToSpeaker)
try! audioRecorder = AVAudioRecorder(url: filePath!, settings: [:])
audioRecorder.delegate = self
audioRecorder.isMeteringEnabled = true
audioRecorder.prepareToRecord()
audioRecorder.record()
}
dirPath finds the directory where the image will be stored.
recordingName will set the name of the actual recorded file
filepath combines the directory and recordingName for the final location
The rest are pretty much self explanatory.
Then create a pause button which is simpler:
#IBAction func pauseButton(_ sender: Any) {
audioRecorder.stop()
let audioSession = AVAudioSession.sharedInstance()
try! audioSession.setActive(false)
}
This should solve how you record the audio.
If you've verified your url has the "aac" extension, then I suspect the you simply forgot to call stop() on your recorder. This results in an un-finalized file.
Also print your errors in the catch block.
do{
try throwingFunc()
} catch {
print(error)
}
The issue was with the line in the stopRecording() function. Below the call to stop() the recorder I am immediately assigning the AVAudioRecorder instance to nil. This deallocates and ends the AVAudioRecorder before the post processing to create the authentic .aac file can be completed.
I like to play a sound in my app. I found a lot of examples and also could compile and run them on Swift 3. But they are always for iOS. When imlementing this code in my app, AVAudioSession keeps undefined. What do I have to do? Is there something different for OSX?
import AVFoundation
...
var audioPlayer = AVAudioPlayer()
func PlaySound( ) {
let alertSound = URL(fileURLWithPath: Bundle.main.path(forResource: "Sound", ofType: "mp3")!)
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)
} catch _ {
}
do {
try AVAudioSession.sharedInstance().setActive(true)
} catch _ {
}
do {
audioPlayer = try AVAudioPlayer(contentsOf: alertSound)
} catch _{
}
audioPlayer.prepareToPlay()
audioPlayer.play()
}
Swift 3 / 4 / 4.2 / 5 - Easiest solution:
NSSound(named: "customSound")?.play()
I've used the following function in conjunction with Swift 3 and a Cocoa project.
import AVFoundation
func playSound(file:String, ext:String) -> Void {
let url = Bundle.main.url(forResource: file, withExtension: ext)!
do {
let player = try AVAudioPlayer(contentsOf: url)
player.prepareToPlay()
player.play()
} catch let error {
print(error.localizedDescription)
}
}
// usage //
playSound(file: "sound", ext: "caf") // where the file name is sound.caf.
Make sure that the target membership checkbox is on when you select an audio file in your Xcode project.
A modified version of El Tomato's answer,
here's the swift 4.2 class that I use quite often for sound:
Usage:
/// EXAMPLE 1
//calling this will create a "bop.m4a" player in memory, and, play it immediately
MakeSound.shared.playSound("bop.m4a")
//calling it a second time has a lot less overhead because the player is already in memory
MakeSound.shared.playSound("bop.m4a")
///EXAMPLE 2
//simply calls the above many times
MakeSound.shared.playSound(["bop.m4a", "beep.m4a", "bong.m4a"])
// EXAMPLE 3
//registers a shorthand, for quick calling
MakeSound.shared.registerSound(fileName: "bop", fileExtension: "m4a", shortHand: "b")
//plays using the shorthand, mostly handy because you define the actual file once,
//and can change the one line of code without having to change every reference
MakeSound.shared.playSound("b")
and the class:
class MakeSound {
private var players = [String : AVAudioPlayer]()
static var shared = MakeSound()
func playSound(fileName: String, fileExtension: String) {
if registerSound(fileName: fileName, fileExtension: fileExtension, shortHand: fileName+"."+fileExtension) {
playSound(fileName+"."+fileExtension)
}
}
//thought about naming this "playsoundS", but i like it with the same name, makes it easier to type
func playSound(_ shortHands : [String]){
for sh in shortHands{
playSound(sh)
}
}
///playSound("shorthand") OR playSound("mySound.mp3")
func playSound(_ shortHand : String) {
if let player = players[shortHand] {
player.prepareToPlay()
player.play()
} else {
if shortHand.contains(".") {
//going to assume that coder will not send "." as a filepath
//also going to assume that coder will not send "xyz." as filepath
var comp = shortHand.components(separatedBy: ".")
let ext = comp.last!
comp.removeLast()
if registerSound(fileName: comp.joined(separator: "."), fileExtension: ext, shortHand: shortHand) {
playSound(shortHand)
}
} else {
print("Sound request sent to makesound, no such shorthand, not a valid filename either.")
}
}
}
///registers a sound with makeSound, and returns a bool based on the success
#discardableResult func registerSound(fileName : String, fileExtension: String, shortHand: String) -> Bool {
guard let url = Bundle.main.url(forResource: fileName, withExtension: fileExtension) else {
print("Unable to register sound: \(shortHand). Filepath not valid.")
return false
}
do {
let player = try AVAudioPlayer(contentsOf: url)
players[shortHand] = player
} catch let error {
print("Audioplayer \"\(shortHand)\" unable to initialize:\n" + error.localizedDescription)
return false
}
return true
}
}
Now I will admit that NSSound is by far the easiest and most straight forward, yet, some of us prefer AVFoundation because we have a bit more control. (And such control is ironically removed in my class)
Even though it is little old-fashioned, you can simply enter the filename in the Attributes Inspector :
All you need to do first is to integrate a sound file into your project: