I'm using vDSP_conv to perform autocorrelation. Mostly it works just fine but every so often it's filling the output array with NaNs.
The code:
func corr_test() {
var pass = 0
var x = [Float]()
for i in 0..<2000 {
x.append(Float(i))
}
while true {
print("pass \(pass)")
let corr = autocorr(x)
if corr[1].isNaN {
print("!!!")
}
pass += 1
}
}
func autocorr(a: [Float]) -> [Float] {
let resultLen = a.count * 2 + 1
let padding = [Float].init(count: a.count, repeatedValue: 0.0)
let a_pad = padding + a + padding
var result = [Float].init(count: resultLen, repeatedValue: 0.0)
vDSP_conv(a_pad, 1, a_pad, 1, &result, 1, UInt(resultLen), UInt(a_pad.count))
return result
}
The output:
pass ...
pass 169
pass 170
pass 171
(lldb) p corr
([Float]) $R0 = 4001 values {
[0] = 2.66466637E+9
[1] = NaN
[2] = NaN
[3] = NaN
[4] = NaN
...
I'm not sure what's going on here. I think I'm handling the 0 padding correctly since if I weren't I don't think I'd be getting correct results 99% of the time.
Ideas? Gracias.
Figured it out. The key was this comment from https://developer.apple.com/library/mac/samplecode/vDSPExamples/Listings/DemonstrateConvolution_c.html :
// “The signal length is padded a bit. This length is not actually passed to the vDSP_conv routine; it is the number of elements
// that the signal array must contain. The SignalLength defined below is used to allocate space, and it is the filter length
// rounded up to a multiple of four elements and added to the result length. The extra elements give the vDSP_conv routine
// leeway to perform vector-load instructions, which load multiple elements even if they are not all used. If the caller did not
// guarantee that memory beyond the values used in the signal array were accessible, a memory access violation might result.”
“Padded a bit.” Thanks for being so specific. Anyway here's the final working product:
func autocorr(a: [Float]) -> [Float] {
let filterLen = a.count
let resultLen = filterLen * 2 - 1
let signalLen = ((filterLen + 3) & 0xFFFFFFFC) + resultLen
let padding1 = [Float].init(count: a.count - 1, repeatedValue: 0.0)
let padding2 = [Float].init(count: (signalLen - padding1.count - a.count), repeatedValue: 0.0)
let signal = padding1 + a + padding2
var result = [Float].init(count: resultLen, repeatedValue: 0.0)
vDSP_conv(signal, 1, a, 1, &result, 1, UInt(resultLen), UInt(filterLen))
// Remove the first n-1 values which are just mirrored from the end so that [0] always has the autocorrelation.
result.removeFirst(filterLen - 1)
return result
}
Note that the results here aren't normalized.
Related
Im trying to re-implement Torch's STFT code in Swift with Accelerate / vDSP, to produce a Log Mel Spectrogram by post processing the STFT so I can use the Mel Spectrogram as an input for a CoreML port of OpenAI's Whisper
Pytorch's native STFT / Mel code produces this Spectrogram (its clipped due to importing raw float 32s into Photoshop lol)
and mine:
Obviously the two things to notice are the values, and the lifted frequency components.
The STFT Docs here https://pytorch.org/docs/stable/generated/torch.stft.html
X[ω,m]=
k=0
∑
win_length-1
window[k] input[m×hop_length+k] * exp(−j * (2π⋅ωk) /win_length)
I believe Im properly handling window[k] input[m×hop_length+k] but I'm a bit lost as to how to calculate the exponent and what -J is referring to in the documentation, and how to convert the final exponential in vDSP. Also, if its a sum, how do I get the 200 elements I need!?
My Log Mel Spectrogram
My code follows:
func processData(audio: [Int16]) -> [Float]
{
assert(self.sampleCount == audio.count)
var audioFloat:[Float] = [Float](repeating: 0, count: audio.count)
vDSP.convertElements(of: audio, to: &audioFloat)
vDSP.divide(audioFloat, 32768.0, result: &audioFloat)
// Up to this point, Python and swift are numerically identical
// insert numFFT/2 samples before and numFFT/2 after so we have a extra numFFT amount to process
// TODO: Is this stricly necessary?
audioFloat.insert(contentsOf: [Float](repeating: 0, count: self.numFFT/2), at: 0)
audioFloat.append(contentsOf: [Float](repeating: 0, count: self.numFFT/2))
// Split Complex arrays holding the FFT results
var allSampleReal = [[Float]](repeating: [Float](repeating: 0, count: self.numFFT/2), count: self.melSampleCount)
var allSampleImaginary = [[Float]](repeating: [Float](repeating: 0, count: self.numFFT/2), count: self.melSampleCount)
// Step 2 - we need to create 200 x 3000 matrix of STFTs - note we appear to want to output complex numbers (?)
for (m) in 0 ..< self.melSampleCount
{
// Slice numFFTs every hop count (barf) and make a mel spectrum out of it
// audioFrame ends up holding split complex numbers
var audioFrame = Array<Float>( audioFloat[ (m * self.hopCount) ..< ( (m * self.hopCount) + self.numFFT) ] )
// Copy of audioFrame original samples
let audioFrameOriginal = audioFrame
assert(audioFrame.count == self.numFFT)
// Split Complex arrays holding a single FFT result of our Audio Frame, which gets appended to the allSample Split Complex arrays
var sampleReal:[Float] = [Float](repeating: 0, count: self.numFFT/2)
var sampleImaginary:[Float] = [Float](repeating: 0, count: self.numFFT/2)
sampleReal.withUnsafeMutableBytes { unsafeReal in
sampleImaginary.withUnsafeMutableBytes { unsafeImaginary in
vDSP.multiply(audioFrame,
hanningWindow,
result: &audioFrame)
var complexSignal = DSPSplitComplex(realp: unsafeReal.bindMemory(to: Float.self).baseAddress!,
imagp: unsafeImaginary.bindMemory(to: Float.self).baseAddress!)
audioFrame.withUnsafeBytes { unsafeAudioBytes in
vDSP.convert(interleavedComplexVector: [DSPComplex](unsafeAudioBytes.bindMemory(to: DSPComplex.self)),
toSplitComplexVector: &complexSignal)
}
// Step 3 - creating the FFT
self.fft.forward(input: complexSignal, output: &complexSignal)
}
}
// We need to match: https://pytorch.org/docs/stable/generated/torch.stft.html
// At this point, I'm unsure how to continue?
// let twoπ = Float.pi * 2
// let freqstep:Float = Float(16000 / (self.numFFT/2))
//
// var w:Float = 0.0
// for (k) in 0 ..< self.numFFT/2
// {
// let j:Float = sampleImaginary[k]
// let sample = audioFrame[k]
//
// let exponent = -j * ( (twoπ * freqstep * Float(k) ) / Float((self.numFFT/2)))
//
// w += powf(sample, exponent)
// }
allSampleReal[m] = sampleReal
allSampleImaginary[m] = sampleImaginary
}
// We now have allSample Split Complex holding 3000 200 dimensional real and imaginary FFT results
// We create flattened 3000 x 200 array of DSPSplitComplex values
var flattnedReal:[Float] = allSampleReal.flatMap { $0 }
var flattnedImaginary:[Float] = allSampleImaginary.flatMap { $0 }
I am writing some Linear Algebra algorithms using Apples Swift / Accelerate framework. All works and the solved Ax = b equations produce the right results (this code is from the apple examples).
I would like to be able to extract the LLT factorisation from the
SparseOpaqueFactorization_Double
object. But there doesn't seem to be any way to extract (to print) the factorisation. Does anyone know of a way of extracting the factorised matrix from the SparseOpaqueFactorization_Double object?
import Foundation
import Accelerate
print("Hello, World!")
// Example of a symmetric sparse matrix, empty cells represent zeros.
var rowIndices: [Int32] = [0, 1, 3, // Column 0
1, 2, 3, // Column 1
2, // col 2
3] // Col 3
// note that the Matrix representation is the upper triangular
// here. Since the matrix is symmetric, no need to store the lower
// triangular.
var values: [Double] = [10.0, 1.0 , 2.5, // Column 0
12.0, -0.3, 1.1, // Column 1
9.5, // Col 2
6.0 ] // Column 3
var columnStarts = [0, // Column 0
3, // Column 1
6, 7, // Column 2
8] // col 3
var attributes = SparseAttributes_t()
attributes.triangle = SparseLowerTriangle
attributes.kind = SparseSymmetric
let structure = SparseMatrixStructure(rowCount: 4,
columnCount: 4,
columnStarts: &columnStarts,
rowIndices: &rowIndices,
attributes: attributes,
blockSize: 1)
let llt: SparseOpaqueFactorization_Double = values.withUnsafeMutableBufferPointer { valuesPtr in
let a = SparseMatrix_Double(
structure: structure,
data: valuesPtr.baseAddress!
)
return SparseFactor(SparseFactorizationCholesky, a)
}
var bValues = [ 2.20, 2.85, 2.79, 2.87 ]
var xValues = [ 0.00, 0.00, 0.00, 0.00 ]
bValues.withUnsafeMutableBufferPointer { bPtr in
xValues.withUnsafeMutableBufferPointer { xPtr in
let b = DenseVector_Double(
count: 4,
data: bPtr.baseAddress!
)
let x = DenseVector_Double(
count: 4,
data: xPtr.baseAddress!
)
SparseSolve(llt, b, x)
}
}
for val in xValues {
print("x = " + String(format: "%.2f", val), terminator: " ")
}
print("")
print("Success")
OK so after much sleuthing around the apple swift headers, I have solved this problem.
There is an Accelerate API call called
public func SparseCreateSubfactor(_ subfactor: SparseSubfactor_t, _ Factor: SparseOpaqueFactorization_Double) -> SparseOpaqueSubfactor_Double
which returns this SparceOpaqueSubfactor_ type. This can be used in a matrix multiplication to produce a "transparent" result (i.e. a matrix you can use/print/see). So I multiplied the SubFactor for the Lower triangular part of the Cholesky factorisation by the Identity matrix to extract the factors. Works a treat!
let subfactors = SparseCreateSubfactor(SparseSubfactorL, llt)
var identValues = generateIdentity(n)
ppm(identValues)
let sparseAs = SparseAttributes_t(transpose: false,
triangle: SparseUpperTriangle,
kind: SparseOrdinary,
_reserved: 0,
_allocatedBySparse: false)
let identity_m = DenseMatrix_Double(rowCount: Int32(n),
columnCount: Int32(n),
columnStride: Int32(n),
attributes: sparseAs,
data: &identValues)
SparseMultiply(subfactors, identity_m) // Output is in identity_m after the call
I wrote a small function to generate an identity matrix which I've used in the code above:
func generateIdentity(_ dimension: Int) -> [Double] {
var iden = Array<Double>()
for i in 0...dimension - 1 {
for j in 0...dimension - 1 {
if i == j {
iden.append(1.0)
} else {
iden.append(0.0)
}
}
}
return iden
}
I want to add the numbers together and print every 4 elements, however i cannot wrap my head around using the stride function, if i am using the wrong approach please explain a better method
var numbers = [1,2,3,4,5,6,7,8,9,10,11,12,13]
func addNumbersByStride(){
var output = Stride...
//first output = 1+2+3+4 = 10
//second output = 5+6+7+8 = 26 and so on
print(output)
}
It seems you would like to use stride ...
let arr = [1,2,3,4,5,6,7,8,9,10,11,12,13]
let by = 4
let i = stride(from: arr.startIndex, to: arr.endIndex, by: by)
var j = i.makeIterator()
while let n = j.next() {
let e = min(n.advanced(by: by), arr.endIndex)
let sum = arr[n..<e].reduce(0, +)
print("summ of arr[\(n)..<\(e)]", sum)
}
prints
summ of arr[0..<4] 10
summ of arr[4..<8] 26
summ of arr[8..<12] 42
summ of arr[12..<13] 13
You can first split the array into chunks, and then add the chunks up:
extension Array {
// split array into chunks of n
func chunked(into size: Int) -> [[Element]] {
return stride(from: 0, to: count, by: size).map {
Array(self[$0 ..< Swift.min($0 + size, count)])
}
}
}
// add each chunk up:
let results = numbers.chunked(into: 4).map { $0.reduce(0, +) }
If you would like to discard the last sum if the length of the original array is not divisible by 4, you can add an if statement like this:
let results: [Int]
if numbers.count % 4 != 0 {
results = Array(numbers.chunked(into: 4).map { $0.reduce(0, +) }.dropLast())
} else {
results = numbers.chunked(into: 4).map { $0.reduce(0, +) }
}
This is quite a basic solution and maybe not so elegant. First calculate and print sum of every group of 4 elements
var sum = 0
var count = 0
for n in stride(from: 4, to: numbers.count, by: 4) {
sum = 0
for i in n-4..<n {
sum += numbers[i]
}
count = n
print(sum)
}
Then calculate the sum of the remaining elements
sum = 0
for n in count..<numbers.count {
sum += numbers[n]
}
print(sum)
I'm trying to implement a solution using the backtracking algorithm.
I have some weights [1,2,7,10,20,70,100,200,700...] and I want to return the weights after a given input/
For example input => 12 should return [2,10]
For example input => 8 should return [1,7]
My code seem's not to work well. It works only for some input numbers like 13 or 8
for targetValue in [13] {
var currentValue = 0
var usedWeights: [Int] = []
for weight in weights {
if targetValue > weight {
currentValue += weight
usedWeights.append(weight)
} else if weight > targetValue {
let rememberLast = usedWeights.last ?? 0
usedWeights.remove(at: usedWeights.count-1)
currentValue -= rememberLast
if currentValue > targetValue || currentValue < targetValue {
let last = usedWeights.remove(at: usedWeights.count-1)
currentValue -= last
usedWeights.append(rememberLast)
currentValue -= rememberLast
print(usedWeights) /// [1, 2, 10] Yeah it work's :) but only for some number ..:(
}
}
}
}
The used weights should be unique.
I have some trouble to find the weights.
This is how the algorithm work
Input => 13
1
1+2
1+2+7
1+2+7+10 //currentValue is now 20
1+2+7 // still no solution get the last removed element and remove the current last element
1+2+10 // Correct weights
I hope you can help me and I explain what I'm doing wrong.
Here's one solution. Iterate in reverse through the weights. If the weight is less than or equal to the current total, use the weight.
let weights = [1,2,7,10,20,70,100,200,700] // the weights you have
let needed = 12 // The total weight you want
var total = needed // The current working total
var needs = [Int]() // The resulting weights needed
for weight in weights.reversed() {
if weight <= total {
needs.append(weight)
total -= weight
}
}
if total == 0 {
print("Need \(needs) for \(needed)")
} else {
print("No match for \(needed)")
}
I don't know how you set the weight.
But consider:
[2,3,6,10,20]
and needed = 21
Then the algorithm will not find it (no match), when there is a solution obviously: 2 + 3 + 6 + 10
So, you should call recursively the search algorithm when it fails, after removing from the weights the first you picked.
This is not very clean, but seem to work (in code, some issue in playground)
func searchIt(weightsSearch: [Int], neededSearch: Int) -> (needs: [Int], remainingWeights: [Int], found: Bool) {
var total = neededSearch // The current working total
var needs = [Int]() // The resulting weights needed
var remaining = weightsSearch
var firstNotYetSelected = true
var position = weightsSearch.count - 1
for weight in weightsSearch.reversed() {
if weight <= total {
needs.append(weight)
total -= weight
if firstNotYetSelected {
remaining.remove(at : position)
}
firstNotYetSelected = false
position -= 1
}
}
return (needs, remaining, total == 0)
}
var needs = [Int]() // The resulting weights needed
var remainingWeights = weights
var foundIt: Bool
repeat {
(needs, remainingWeights, foundIt) = searchIt(weightsSearch: remainingWeights, neededSearch: needed)
if foundIt {
print("Need \(needs) for \(needed)")
break
} else {
print("No match yet for \(needed)")
}
} while remainingWeights.count >= 1
with the test case
let weights = [2,3,6,10,20]
let needed = 21
we get
No match yet for 21
Need [10, 6, 3, 2] for 21
If you want ALL the solutions, replace break statement by continue.
With the test case
let weights = [2,3,6,10,15,20]
let needed = 21
we get the 2 solutions
No match for 21
No match for 21
Need [15, 6] for 21
Need [10, 6, 3, 2] for 21
No match for 21
No match for 21
No match for 21
I am trying to solve the second problem on Project Euler. The problem is as follows:
Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
I think I've written a solution, but when I try to run my code it crashes my Swift playground and gives me this error message:
Playground execution aborted: Execution was interrupted, reason: EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0)
var prev = 0
var next = 1
var num = 0
var sum = 0
for var i = 1; i < 400; i++ {
num = prev + next
if next % 2 == 0 {
sum += next
}
prev = next
next = num
}
print(sum)
The weird thing is, if I set the counter on my loop to less than 93, it works fine. Explicitly setting the variable names to Double does not help. Anyone know what's going on here?
There is nothing weird about this at all. Do you know how large the 400 fibonacci number is?
176023680645013966468226945392411250770384383304492191886725992896575345044216019675
Swift Int64 or UInt64 simply cannot handle that large of a number. The later can go up to 18446744073709551615 at max - not even close.
If you change your variables to be doubles it works but will be inaccurate:
var prev : Double = 0
var next : Double = 1
var num : Double = 0
var sum : Double = 0
will yield
2.84812298108489e+83
which is kind of close to the actual value of
1.76e+83
Luckily you do not need to get values that big. I would recommend not writing a for loop but a while loop that calculates the next fibonacci number until the break condition is met whose values do not exceed four million.
The Fibonacci numbers become very large quickly. To compute large Fibonacci numbers, you need to implement some kind of BigNum. Here is a version the makes a BigNum that is implemented internally as an array of digits. For example, 12345 is implemented internally as [1, 2, 3, 4, 5]. This makes it easy to represent arbitrarily large numbers.
Addition is implemented by making the two arrays the same size, then map is used to add the elements, finally the carryAll function restores the array to single digits.
For example 12345 + 67:
[1, 2, 3, 4, 5] + [6, 7] // numbers represented as arrays
[1, 2, 3, 4, 5] + [0, 0, 0, 6, 7] // pad the shorter array with 0's
[1, 2, 3, 10, 12] // add the arrays element-wise
[1, 2, 4, 1, 2] // perform carry operation
Here is the implementation of BigNum. It is also CustomStringConvertible which makes it possible to print the result as a String.
struct BigNum: CustomStringConvertible {
var arr = [Int]()
// Return BigNum value as a String so it can be printed
var description: String { return arr.map(String.init).joined() }
init(_ arr: [Int]) {
self.arr = carryAll(arr)
}
// Allow BigNum to be initialized with an `Int`
init(_ i: Int = 0) {
self.init([i])
}
// Perform the carry operation to restore the array to single
// digits
func carryAll(_ arr: [Int]) -> [Int] {
var result = [Int]()
var carry = 0
for val in arr.reversed() {
let total = val + carry
let digit = total % 10
carry = total / 10
result.append(digit)
}
while carry > 0 {
let digit = carry % 10
carry = carry / 10
result.append(digit)
}
return result.reversed()
}
// Enable two BigNums to be added with +
static func +(_ lhs: BigNum, _ rhs: BigNum) -> BigNum {
var arr1 = lhs.arr
var arr2 = rhs.arr
let diff = arr1.count - arr2.count
// Pad the arrays to the same length
if diff < 0 {
arr1 = Array(repeating: 0, count: -diff) + arr1
} else if diff > 0 {
arr2 = Array(repeating: 0, count: diff) + arr2
}
return BigNum(zip(arr1, arr2).map { $0 + $1 })
}
}
// This function is based upon this question:
// https://stackoverflow.com/q/52975875/1630618
func fibonacci(to n: Int) {
guard n >= 2 else { return }
var array = [BigNum(0), BigNum(1)]
for i in 2...n {
array.append(BigNum())
array[i] = array[i - 1] + array[i - 2]
print(array[i])
}
}
fibonacci(to: 400)
Output:
1
2
3
5
8
...
67235063181538321178464953103361505925388677826679492786974790147181418684399715449
108788617463475645289761992289049744844995705477812699099751202749393926359816304226
176023680645013966468226945392411250770384383304492191886725992896575345044216019675