Swift closure compiling failed with expression too complex - swift

public static func easeOutQuint(_ t: Float, _ b: Float, _ c: Float, _ d: Float = 1.0) -> Float {
return {
return c * ($0 * $0 * $0 * $0 * $0 + 1.0) + b
}(t / d - 1.0)
}
I'm not familiar with closure, so I can not fix it by myself, can someone help me?

Closures are a great tool but, in this particular example, you would be better off without them IMO...
For instance, you could rewrite your function simply as:
public static func easeOutQuint(_ t: Float, _ b: Float, _ c: Float, _ d: Float = 1.0) -> Float {
let x = (t / d - 1.0)
return c * (pow(x, 5) + 1) + b
}
And, by the way, this should compile just fine in any Swift compiler you come across ;)

Related

Generic with default value of nil

I'm attempting to create a generic class/struct initializer that converts any value it takes in and assigns it to its Double properties. (Note: the as! or as? pattern below is just being used to save space here. I'm actually using this extension to convert my values.)
The idea here is that I'd like to be able to mix Integers, Doubles, CGFloats, etc (i.e. anything in the Numeric generic type constraint) and have them all end up as Doubles. It works in other situations, but I'm running into an issue here.
In the pared down example below I'm creating a Vector class and the z property should be able to be nil in case we're only dealing with 2 dimensions.
struct Vector {
var x: Double
var y: Double
var z: Double?
public init<X: Numeric, Y: Numeric, Z: Numeric>(_ x: X, _ y: Y, _ z: Z? = nil){
self.x = x as! Double
self.y = y as! Double
self.z = z as? Double
}
}
var myVector = Vector(4, 3.4)
print("""
\(myVector.x)
\(myVector.y)
\(myVector.z)
"""
)
The problem is that I get an error that says Generic parameter 'Z' could not be inferred.
I would do this with two separate intitializers, one of which doesn't need to have an unused generic Z:
struct Vector {
var x: Double
var y: Double
var z: Double?
}
extension Vector {
public init<X: Numeric, Y: Numeric>(_ x: X, _ y: Y) {
self.init(x: x as! Double, y: y as! Double, z: nil as Double?)
}
public init<X: Numeric, Y: Numeric, Z: Numeric>(_ x: X, _ y: Y, _ z: Z? = nil) {
self.init(x: x as! Double, y: y as! Double, z: z as? Double)
}
}

Swift function that swaps two values

I need to write a function that swaps the contents of any two variables. This is what I have currently but I am getting the error "Cannot assign to value: 'a' is a 'let' constant".
func swap<T>(a: T, b: T) {
(a, b) = (b, a);
}
Can somebody please explain why this doesn't work and suggest how to fix it?
Thank you.
You need to make the parameters inout to be able to do that, like this:
func swap<T>(_ a: inout T,_ b: inout T) {
(a, b) = (b, a)
}
var a = 0, b = 1
swap(&a, &b)
The parameters inside your function is immutable so it cannot be swapped , like if you trying to swap two let value:
let x = 5
let y = 6
//if you try to swap these values Xcode will tell you to change their declaration to var
Here almost the same and to be able to change the values you have to pass inout declaration like below :
func swapTwoValues<T>(_ a: inout T, _ b: inout T) {
(a, b) = (b, a)
}
// call it like this
var x = 1
var y = 2
swapTwoValues(&x, &y)
// Note that you cannot call it like this: swapTwoValues(&1, &2)
That is because the arguments of functions are constants and are treated as local properties, unless you mark them inout and then they are treated as sort of "pass-through" properties that aren't confined to the scope of the function.
var kiwi = "kiwi"
var mango = "mango"
func swap<T>(a: inout T, b: inout T) {
(a, b) = (b, a)
}
swap(&kiwi, &mango)
If having to put & everywhere or the verbosity of the function annoy you, you could always use a descriptive operator like this:
infix operator <->: AssignmentPrecedence
func <-> <A>(lhs: inout A, rhs: inout A) {
(lhs, rhs) = (rhs, lhs)
}
var kenobi = "Hello there!"
var helloThere = "Kenobi..."
helloThere <-> kenobi
print(helloThere, kenobi, separator: "\n")
// Hello there!
// Kenobi...
Which I feel looks much nicer. You don't need to use & in operators with inout parameters – that's how the += and -= operators work:
public extension AdditiveArithmetic {
static func +=(lhs: inout Self, rhs: Self) {
lhs = lhs + rhs
}
static func -=(lhs: inout Self, rhs: Self) {
lhs = lhs - rhs
}
}

Apply void function on array

I am learning swift right now and wanted to write a quicksort algorithm for [Int]
func swap(array:[Int], x:Int, y:Int)->[Int] { //swap elements in array
var result:[Int] = array
result[y] = array[x]
result[x] = array[y]
return result
}
func split(array:[Int], u:Int, o:Int , p:Int)->Int { //where the sorting happens
let pivot:Int = array[p]
swap(array: array, x: p, y: o)
var pn:Int = u
for j in u...o {
if(array[j] <= pivot) {
swap(array: array, x: pn, y: j)
pn = pn+1
}
}
swap(array: array, x: pn, y: o);
return pn;
}
func quickSortR(array:[Int],u:Int ,o:Int) { //recursive call
if(u < o){
let p:Int = o
let pn:Int = split(array: array, u: u, o: o, p: p)
quickSortR(array: array,u: u,o: pn-1)
quickSortR(array: array,u: pn+1,o: o)
}
}
func quickSort(array:[Int]) { //initial call
quickSortR(array: array, u: 0, o: array.count-1)
}
My problem is that I don't know how to apply this implementation on an array.
For example if I got the array a:[Int] = [3,1,2].
I can't check if the implementation is working by print(quickSort(a)), because the return type of quickSort is void.
Of course I can't apply quickSort on that array like a.quickSort(a)
I really don't want to change my implementation of the algorithm by much if it isn't the cause of that problem (e.g. just the signature or return type)
Just improve your syntax:
func swap(array: inout [Int], x:Int, y:Int) { //swap elements in array
let temp = array[x]
array[x] = array[y]
array[y] = temp
}
func split(array: inout [Int], u:Int, o:Int , p:Int) -> Int { //where the sorting happens
print(ar, u , o , p)
let pivot:Int = array[p]
swap(array: &array, x: p, y: o)
var pn:Int = u
for j in u..<o {
if(array[j] <= pivot) {
swap(array: &array, x: pn, y: j)
pn = pn+1
}
}
swap(array: &array, x: pn, y: o);
return pn;
}
func quickSortR(array: inout [Int],u:Int ,o:Int) { //recursive call
if(u < o){
let p:Int = o
let pn:Int = split(array: &array, u: u, o: o, p: p)
quickSortR(array: &array,u: u,o: pn-1)
quickSortR(array: &array,u: pn+1,o: o)
}
}
func quickSort(array: inout [Int]) { //initial call
quickSortR(array: &array, u: 0, o: array.count-1)
}
If you use func swap(array: [Int]) array is immutable inside the method. It just copies. Using inout resolves this problem.
To check the code use somethink like this:
var ar = [1]
quickSort(array: &ar)
print(ar)

How to properly support Int values in CGFloat math in Swift?

Goal
I (like many others on the web) would like to use Int variables and literals in CGFloat math since readability & ease of development outweigh a possible loss in precision by far. This is most noticeable when you use manual layout throughout an app instead of using the Storyboard.
So the following should work without any manual CGFloat casts:
let a = CGFloat(1)
let b = Int(2)
let c = a / b // Cannot invoke / with an arguments list of type (CGFloat, Int)
let d = b / a // Cannot invoke / with an arguments list of type (Int, CGFloat)
let e = a / 2 // => CGFloat(0.5)
let f = 2 / a // => CGFloat(2.0)
let g = 2 / b // => Int(1)
let h = b / 2 // => Int(1)
let i = 2 / 2 // => Int(1)
let j: CGFloat = a / b // Cannot invoke / with an arguments list of type (CGFloat, Int)
let k: CGFloat = b / a // Cannot invoke / with an arguments list of type (Int, CGFloat)
let l: CGFloat = a / 2 // => CGFloat(0.5)
let m: CGFloat = 2 / a // => CGFloat(2.0)
let n: CGFloat = 2 / b // Cannot invoke / with an arguments list of type (IntegerLiteralConvertible, Int)
let o: CGFloat = b / 2 // Cannot invoke / with an arguments list of type (Int, IntegerLiteralConvertible)
let p: CGFloat = 2 / 2 // => CGFloat(1.0)
Approach
Since we cannot add implicit conversions to Swift types I had to add appropriate operators which take CGFloat and Int.
func / (a: CGFloat, b: Int) -> CGFloat { return a / CGFloat(b) }
func / (a: Int, b: CGFloat) -> CGFloat { return CGFloat(a) / b }
Problem
The two operators become ambiguous when Swift tries to implicitly create CGFloat values from integer literals. It doesn't know which of the two operands to convert (example case p).
let a = CGFloat(1)
let b = Int(2)
let c = a / b // => CGFloat(0.5)
let d = b / a // => CGFloat(2.0)
let e = a / 2 // => CGFloat(0.5)
let f = 2 / a // => CGFloat(2.0)
let g = 2 / b // => Int(1)
let h = b / 2 // => Int(1)
let i = 2 / 2 // => Int(1)
let j: CGFloat = a / b // => CGFloat(0.5)
let k: CGFloat = b / a // => CGFloat(2.0)
let l: CGFloat = a / 2 // => CGFloat(0.5)
let m: CGFloat = 2 / a // => CGFloat(2.0)
let n: CGFloat = 2 / b // => CGFloat(1.0)
let o: CGFloat = b / 2 // => CGFloat(1.0)
let p: CGFloat = 2 / 2 // Ambiguous use of operator /
Question
Is there any way to declare the operators in a way where there is no ambiguous use and all test cases succeed?
For starters, tl;dr: NO.
The problem is that we're asking the compiler to do too much implicitly.
I'm going to use regular functions for this answer, because I want it to be clear that this has nothing to do with operators.
So, we need the following 4 functions:
func foo(a: Int, b: Int) -> Int {
return 1
}
func foo(a: Int, b: Float) -> Float {
return 2.0
}
func foo(a: Float, b: Int) -> Float {
return 3.0
}
func foo(a: Float, b: Float) -> Float {
return 4.0
}
And now we're in the same scenario. I'm going to ignore all of the ones that work and focus on these two scenarios:
let bar1 = foo(1,2)
let bar2: Float = foo(1,2)
In the first scenario, we're asking Swift to implicitly determine just one thing: What type should bar1 be? The two arguments we pass to foo are 1, which is of type IntegerLiteralConvertible, and 2, which is again of the type IntegerLiteralConvertible.
Because there is only one override for foo which takes two Int arguments, Swift is able to figure out what type bar1 should be, and that's whatever type the foo(Int,Int) override returns, which is Int.
Now, consider the scenario in which we add the following function:
func foo(a: Int, b: Int) -> Float {
return 5.0
}
Now, scenario 1 becomes ambiguous:
let bar1 = foo(1,2)
We're asking Swift to determine TWO things implicitly here:
Which override of foo to use
What type to use for bar1
There is more than one way to satisfy the scenario. Either bar1 is an Int and we use the foo(Int,Int)->Int override, or bar2 is a Float, and we use the foo(Int,Int)->Float override. The compiler can't decide.
We can make the situation less ambiguous though as such:
let bar1: Int = foo(1,2)
In which case the compiler knows we want the foo(Int,Int)->Int override--it's the only one that satisfies the scenario. Or we can do:
let bar2: Float = foo(1,2)
In which case the compiler knows we want the foo(Int,Int)->Float override--again, the only way to satisfy the scenario.
But let's take a look back at my scenario 2, which is exactly the problem scenario you have:
let bar2: Float = foo(1,2)
There's not a foo(Int,Int)->Float override (forget everything about scenario 1 in which we added this override). However, the type IntegerLiteralConvertible can be implicitly cast to different types of numeric data types (only literal integers... not integer variables). So the compiler will try to find a foo override that takes arguments that IntegerLiteralConvertible can be cast to and returns a Float, which we've explicitly marked as bar2's type.
Well, IntegerLiteralConvertible can be cast as a Float, so the compiler finds three functions that take some combination of the right argument:
foo(Int,Float) -> Float
foo(Float,Int) -> Float
foo(Float,Float) -> Float
And the compiler doesn't know which to use. How can it? Why should it prioritize casting one or the other of your literals integers to a float?
So we get the ambiguity problem.
We can give the compiler another override. It's the same one we gave it in scenario 2: foo(Int,Int) -> Float, and now the compiler is okay with the following:
let bar2: Float = foo(1,2)
Because in this case, without implicitly casting the IntegerLiteralConvertibles, the compiler was able to find a function that matched: foo(Int, Int) -> Float.
So now you're thinking:
All right, let me just add this foo(Int,Int)->Float and everything will work!
Right?
Well, sorry to disappoint, but given the following two functions:
foo(Int,Int) -> Int
foo(Int,Int) -> Float
We will still run into ambiguity problems:
let bar3 = foo(1,2)
There are two overrides for foo that take an Int (or IntegerLiteralConvertible), and because we haven't specified bar3's type and we're asking the compiler both figure out the appropriate override and implicitly determine bar3's type, which it cannot do.

How can an operator be overloaded for different RHS types and return values?

Given the following struct:
struct Vector3D {
x: f32,
y: f32,
z: f32
}
I want to overload its * operator to do a dot product when the right hand side is a Vector3D, and to do an element-wise multiplication when the RHS is a f32. My code looks like this:
// Multiplication with scalar
impl Mul<f32, Vector3D> for Vector3D {
fn mul(&self, f: &f32) -> Vector3D {
Vector3D {x: self.x * *f, y: self.y * *f, z: self.z * *f}
}
}
// Multiplication with vector, aka dot product
impl Mul<Vector3D, f32> for Vector3D {
fn mul(&self, other: &Vector3D) -> f32 {
self.x * other.x + self.y * other.y + self.z * other.z
}
}
The compiler says for the first impl block:
Vector3D.rs:40:1: 44:2 error: conflicting implementations for trait `std::ops::Mul`
Vector3D.rs:40 impl Mul<f32, Vector3D> for Vector3D {
...
Vector3D.rs:53:1: 57:2 note: note conflicting implementation here
Vector3D.rs:53 impl Mul<Vector3D, f32> for Vector3D {
...
and vice versa for the other implementation.
As of Rust 1.0, you can now implement this:
use std::ops::Mul;
#[derive(Copy, Clone, PartialEq, Debug)]
struct Vector3D {
x: f32,
y: f32,
z: f32,
}
// Multiplication with scalar
impl Mul<f32> for Vector3D {
type Output = Vector3D;
fn mul(self, f: f32) -> Vector3D {
Vector3D {
x: self.x * f,
y: self.y * f,
z: self.z * f,
}
}
}
// Multiplication with vector, aka dot product
impl Mul<Vector3D> for Vector3D {
type Output = f32;
fn mul(self, other: Vector3D) -> f32 {
self.x * other.x + self.y * other.y + self.z * other.z
}
}
fn main() {
let a = Vector3D {
x: 1.0,
y: 2.0,
z: 3.0,
};
let b = a * -1.0;
let c = a * b;
println!("{:?}", a);
println!("{:?}", b);
println!("{:?}", c);
}
The big change that allows this is the introduction of associated types, which shows up as the type Output = bit in each implementation. Another notable change is that the operator traits now take arguments by value, consuming them, so I went ahead and implemented Copy for the struct.
At the moment only a single impl is allowed per trait-type pair.
This situation will be improved with RFC 48, but it's not the full story (it's not really any of the story). The relevant section is Coherence, and it certainly doesn't mention the operator overloading case specifically, and essentially says it is still illegal:
The following example is NOT OK:
trait Iterator<E> { ... }
impl Iterator<char> for ~str { ... }
impl Iterator<u8> for ~str { ... }
Niko Matsakis (author of that RFC & Rust-type-system expert) has been thinking about these overloading traits specifically: he is the one who published ("What if I want overloading?") the trick below, but he has expressed his distaste towards it, mentioning that he'd like to allow implementations as you have written...
... which is where his RFC 135 comes in. The situation is described in detail in "multidispatch traits".
You can work-around it for now using secondary traits. The extra layer of traits allows you to write just one impl Mul<...> for Vector3D but comes at the cost of requiring a new trait for each type for which you wish to have multiple implementations of Mul.
#[deriving(Show)]
struct Vector3D {
x: f32,
y: f32,
z: f32
}
trait MulVec3D<Res> {
fn do_mul(&self, v: &Vector3D) -> Res;
}
// Multiplication with scalar
impl MulVec3D<Vector3D> for f32 {
fn do_mul(&self, v: &Vector3D) -> Vector3D {
Vector3D {x: v.x * *self, y: v.y * *self, z: v.z * *self}
}
}
// Multiplication with vector, aka dot product
impl MulVec3D<f32> for Vector3D {
fn do_mul(&self, v: &Vector3D) -> f32 {
self.x * v.x + self.y * v.y + self.z * v.z
}
}
impl<Res, RHS: MulVec3D<Res>> Mul<RHS, Res> for Vector3D {
fn mul(&self, rhs: &RHS) -> Res {
rhs.do_mul(self)
}
}
fn main() {
let a = Vector3D { x: 1.0, y: 2.0, z: 3.0 };
let b = Vector3D { x: -3.0, y: 2.0, z: -1.0 };
println!("{}, {}", a * 2f32, a * b); // Vector3D { x: 2, y: 4, z: 6 }, -2
}