'Float' is not identical to 'UInt8' Swift - swift

I have no idea what's wrong with my code for a Taylor series:
func factorial(n: Int) -> Int {
return n == 0 ? 1 : n * factorial(n - 1)
}
func sin(num: Float) -> Float {
let rad : Float = num * 1.0 / 180.0 * 3.1415926535897;
var sum : Float = rad;
for i in 1...100 {
if (i % 2 == 0) {
sum += Float(pow(rad, 2 * i + 1) / Float(factorial(2 * i + 1)));
} else {
sum -= Float(pow(rad, 2 * i + 1)) / Float(factorial(2 * i + 1));
}
}
return sum;
}
print(sin(123.0));
Here are the errors:
<stdin>:11:17: error: cannot invoke '/' with an argument list of type '(#lvalue Float, $T25)'
sum += Float(pow(rad, 2 * i + 1) / Float(factorial(2 * i + 1)));
~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
<stdin>:13:13: error: 'Float' is not identical to 'UInt8'
sum -= Float(pow(rad, 2 * i + 1)) / Float(factorial(2 * i + 1));
^

The pow function needs two arguments of the same type, either Float or Double, and so does the division.
Change your sum +/-=... statements to:
if (i % 2 == 0) {
sum += pow(rad, Float(2 * i + 1)) / Float(factorial(2 * i + 1))
} else {
sum -= pow(rad, Float(2 * i + 1)) / Float(factorial(2 * i + 1))
}

Do you need to deal with both Ints and Floats? Swift is really picky about types, so if you can stick to one, that will make your life way easier. With just Floats, you can get this working by adding a single line:
func factorial(n: Float) -> Float {
return n == 0 ? 1 : n * factorial(n - 1)
}
func sin(num: Float) -> Float {
let rad : Float = num * 1.0 / 180.0 * 3.1415926535897;
var sum : Float = rad;
for i in 1...100 {
let float_index :Float = Float(i)
if (i % 2 == 0) {
sum += Float(pow(rad, 2 * float_index + 1) / Float(factorial(2 * float_index + 1)));
} else {
sum -= Float(pow(rad, 2 * float_index + 1)) / Float(factorial(2 * float_index + 1));
}
}
return sum;
}

I think pow requires its arguments to be of the same type but what you have isn't going to work even if you do sort out that issue. That's because your factorial function is generating numbers that are too big.
The largest 64-bit integral number has about twenty digits while the factorial of 200 has about 375.
Ergo it won't fit into a Int, it won't even fit into a Double (double precision IEEE754 number), which maxes out at about 308 digits (upper range, obviously precision is less).
You'll need to move up to Float80 which provides for several thousand digits of range.
But, again, due to the limited precision, you're likely to introduce a lot of errors into your calculations. This would be true even with the IEEE754 quad precision, limited to about 34 decimal digits of precision.

Related

How to modify this code to return Geopoint

I would like this code to return a newly constructed geopoint.
I need this,
GeoPoint prjTest=new GeoPoint(vxi+x,vyi+y);
to stick somewhere and return prjTest. I'm new to programming and I don't know well synthax.I tried many things, I can keep guessing for a long time. Please help. Thanks.
public class ProjectileTest
{
public ProjectileTest(float vi, float angle) /** renamed velocity -> vi */
{
//Starting location
double xi = 0, yi = 100;
final double gravity = -9.81;
//timeSlice declares the interval before checking the new location.
double timeSlice = 0.001; /** renamed time -> timeSlice */
double totalTime = 0; /** renamed recordedTime -> totalTime */
double vxi = vi * Math.cos(Math.toRadians(angle)); /** renamed xSpeed -> vxi */
double vyi = vi * Math.sin(Math.toRadians(angle)); /** renamed ySpeed -> vyi */
//This (secondsTillImpact) seems to give a very accurate time whenever the angle is positive.
double secondsTillImpact = Math.sqrt(2 * yi / -(gravity));
/** Not sure I agree. Does this formula take into account the upward motion
* of the projectile along its parabolic arc? My suspicion is that this
* formula only "works" when the initial theta is: 180 <= angle <= 360.
*
* Compare with the result predicted by quadratic(). Discarding the zero
* intercept which can't work for us (i.e. the negative one, because time
* only moves forward) leaves us with an expected result very close to the
* observed result.
*/
double y;
double x;/** Current position along the y-axis */
do {
// x = x + (xSpeed * time);
x = vxi * totalTime; /** Current position along the x-axis */
// y = y + (ySpeed * time);
y = yi + vyi * totalTime + .5 * gravity * (totalTime * totalTime);
// ySpeed = ySpeed + (gravity * time);
double vy = vyi + gravity * totalTime; /** Current velocity of vector y-component */
System.out.println("X: " + round2(x) + " Y: " + round2(y) + " YSpeed: " + round2(vy));
totalTime += timeSlice;
}
while (y > 0);
////////////////////////////++++++++ GeoPoint prjTest=new GeoPoint(vxi+x,vyi+y);
System.out.println("Incorrectly expected seconds: " + secondsTillImpact + "\nResult seconds: " + totalTime);
quadratic((.5 * gravity), vyi, yi);
}
public double round2(double n) {
return (int) (n * 100.0 + 0.5) / 100.0;
}
public void quadratic(double a, double b, double c) {
if (b * b - 4 * a * c < 0) {
System.out.println("No roots in R.");
} else {
double dRoot = Math.sqrt(b * b - 4 * a * c); /** root the discriminant */
double x1 = (-b + dRoot) / (2 * a); /** x-intercept 1 */
double x2 = (-b - dRoot) / (2 * a); /** x-intercept 2 */
System.out.println("x-int one: " + x1 + " x-int two: " + x2);
}
}
}

Splitting method algorithm

(x^3 - 2x^2 - 5) is my equation.First of all I have two values like x = 2 and x = 4. My first two values must be count for equation and them results must be negative and positive each time. And second step is (2 + 4) / 2 = 3 this time x = 3 in equation. And the math operation continue with last one positive value and one negative value. I try this
var x = 2.0
var equation = pow(x, 3) - 2 * pow(x, 2) - 5
switch x {
case x : 2
equation = pow(x, 3) - 2 * pow(x, 2) - 5
case x : 4
equation = pow(x, 3) - 2 * pow(x, 2) - 5
default:
0
}
print(equation)
How can I assign first two values like 2 and 4 for one var x ?
Apparently you want to implement the bisection method to find the (real) solution (“root”) of an equation. The first step is to define that equation as a function, so that it can be evaluated at various points:
func f(_ x: Double) -> Double {
return pow(x, 3) - 2 * pow(x, 2) - 5
}
Then you need two variables for the left and right boundary of the current interval. These must be chosen such that f(x) has opposite signs at the boundaries. In your example:
var xleft = 2.0 // f(xleft) < 0
var xright = 4.0 // f(xright) > 0
Now you can start the iteration: Compute f(x) at the midpoint of the current interval, and replace xleft of xright, depending on whether f(x) is negative or positive. Continue until the approximation is good enough for your purposes:
let eps = 0.0000001 // Desired precision
let leftSign = f(xleft).sign
repeat {
let x = (xleft + xright)/2.0
let y = f(x)
if y == 0 {
xleft = x
break
} else if y.sign == leftSign {
xleft = x
} else {
xright = x
}
// print(xleft, xright)
} while xright - xleft > eps
// Print approximate solution:
print(xleft)
The next step would be to implement the bisection method itself as a function:
func bisect(_ f: ((Double) -> Double), xleft: Double, xright: Double, eps: Double = 1.0e-6) -> Double {
let yleft = f(xleft)
let yright = f(xright)
precondition(yleft * yright <= 0, "f must have opposite sign at the boundaries")
var xleft = xleft
var xright = xright
repeat {
let x = (xleft + xright)/2.0
let y = f(x)
if y == 0 {
return x
} else if y.sign == yleft.sign {
xleft = x
} else {
xright = x
}
} while xright - xleft > eps
return (xleft + xright)/2.0
}
so that it can be used with arbitrary equations:
let sol1 = bisect({ x in pow(x, 3) - 2 * pow(x, 2) - 5 }, xleft: 2.0, xright: 4.0)
print(sol1) // 2.690647602081299
let sol2 = bisect({ x in cos(x/2)}, xleft: 3.0, xright: 4.0, eps: 1.0e-15)
print(sol2) // 3.1415926535897936

Swift CORDIC Algorithm gives constant answer

I tried to translate the MATLAB language on Cordic wikipedia webpage
However when I type those:
print(cordic(beta: Double.pi/9, n: 20))
print(cordic(beta: Double.pi/8, n: 20))
I get
[-0.17163433840184755, 0.98516072489744066]
[-0.17163433840184755, 0.98516072489744066]
It's always giving me a constant answer. Why? I'm sure that the "angle" and "Kvalues" arrays are properly calculated.
Here's the code:
import Foundation
var angles: [Double] = []
for i: Double in stride(from: 0, to: 27, by: 1) {
angles.append(atan(pow(2, -i)))
}
var Kvalues: [Double] = []
for i: Double in stride(from: 0, to: 23, by: 1) {
Kvalues.append(1/sqrt(abs(Double(1) + pow(2,-2 * i))))
if i > 0 {
Kvalues[Kvalues.count - 1] *= Kvalues[Kvalues.count - 2]
}
}
func min(_ a: Int, _ b: Int) -> Int {
return a > b ? b : a
}
func cordic(beta: Double, n: Int) -> [Double] {
var beta1 = beta
let Kn = Kvalues[min(n, Kvalues.count - 1)]
var v: [Double] = [1,0]
var poweroftwo: Double = 1
var angle = angles[0]
for j in 0 ..< n {
let sigma: Double = beta < 0 ? -1 : 1
let factor: Double = sigma * poweroftwo
v = [v[0] - v[1] * factor, v[1] + v[0] * factor]
beta1 -= sigma * angle
poweroftwo /= 2
angle = j + 2 > angles.count ? angle / 2 : angles[j + 2]
}
return [v[0] * Kn, v[1] * Kn]
}
print(cordic(beta: Double.pi/9, n: 20))
print(cordic(beta: Double.pi/8, n: 20))
You get the same result for different input because in
let sigma: Double = beta < 0 ? -1 : 1
beta should be beta1, which is the local variable that is
updated in the loop.
But even after fixing that the results are not correct, and that is
caused by two "off-by-one" index errors. The arrays in the algorithm
description are 1-based and Swift arrays are 0-based. So
let Kn = Kvalues[min(n, Kvalues.count - 1)]
// should be
let Kn = Kvalues[min(n-1, Kvalues.count - 1)]
and
angle = j + 2 > angles.count ? angle / 2 : angles[j + 2]
// should be
angle = j + 1 >= angles.count ? angle / 2 : angles[j + 1]
The angles and Kvalues arrays should be defined for i from 0 up to and including 27 resp. 23.
Finally, there is no need to define your own min function as there is one in the Swift standard library.
Putting it all together your code would be:
var angles: [Double] = []
for i: Double in stride(from: 0, through: 27, by: 1) {
angles.append(atan(pow(2, -i)))
}
var Kvalues: [Double] = []
for i: Double in stride(from: 0, through: 23, by: 1) {
Kvalues.append(1/sqrt(abs(Double(1) + pow(2,-2 * i))))
if i > 0 {
Kvalues[Kvalues.count - 1] *= Kvalues[Kvalues.count - 2]
}
}
func cordic(beta: Double, n: Int) -> [Double] {
var beta1 = beta
let Kn = Kvalues[min(n-1, Kvalues.count - 1)]
var v: [Double] = [1,0]
var poweroftwo: Double = 1
var angle = angles[0]
for j in 0 ..< n {
let sigma: Double = beta1 < 0 ? -1 : 1
let factor: Double = sigma * poweroftwo
v = [v[0] - v[1] * factor, v[1] + v[0] * factor]
beta1 -= sigma * angle
poweroftwo /= 2
angle = j + 1 >= angles.count ? angle / 2 : angles[j + 1]
}
return [v[0] * Kn, v[1] * Kn]
}
And that produces good approximations:
print(cordic(beta: Double.pi/9, n: 20)) // [0.93969210812600046, 0.34202155184390554]
print(cordic(beta: Double.pi/8, n: 20)) // [0.92388022188807306, 0.38268176805806309]
The exact values are
print(cos(Double.pi/9), sin(Double.pi/9)) // 0.939692620785908 0.342020143325669
print(cos(Double.pi/8), sin(Double.pi/8)) // 0.923879532511287 0.38268343236509

How can I implement NORMDIST function in objective c?

I am trying to implement a NORMDIST feature in my iphone application, but I am not sure what library to import, or how I would go about doing this.
If someone can point me in a direction, that would be awesome.
Not sure if this is precisely what you're looking for but here is an algorithm for calculating a cumulative normal distribution approximation. There is an implementation in C++ that should be fairly trivial to port to Obj-C.
Try this:
static double normdist (double x, double mean, double standard_dev) {
double res;
x = (x - mean) / standard_dev;
if (x == 0) {
res = 0.5;
} else {
double oor2pi = 1 / (sqrt(2.00000 * 3.14159265358979323846));
double t = 1 / (1.0000000 + 0.2316419 * fabs(x));
t *= oor2pi * exp(-0.5 * x * x)
* (0.31938153 + t
* (-0.356563782 + t
* (1.781477937 + t
* (-1.821255978 + t * 1.330274429))));
if (x >= 0)
res = 1.00000 - t;
else
res = t;
}
return res;
}

How to determine Y Axis values on a chart

I'm working on a charting algorithm that will give me a set n array of y axis values I would use on my graph.
The main problem is that I also want to calculate the number of number of steps to use and also use nice numbers for them. It must be able to take integers and doubles and be able to handle small ranges (under 1) and large ranges (over 10000 etc).
For example, if I was given a range of 0.1 - 0.9, ideally i would have values of 0, 0.2, 0.4, 0.6, 0.8, 1 but if I were given 0.3 to 0.7 I might use 0.3, 0.4, 0.5, 0.6, 0.7
This is what I have so far, it works well with small ranges, but terribly in large ranges, and doesn't give me nice numbers
-(double*)yAxisValues:(double)min (double):max {
double diff = max - min;
double divisor = 1.0;
if (diff > 1) {
while (diff > 1) {
diff /= 10;
divisor *= 10;
}
} else {
while (diff < 1) {
diff *= 10;
divisor *= 10;
}
}
double newMin = round(min * divisor) / divisor;
double newMax = round(max * divisor) / divisor;
if (newMin > min) {
newMin -= 1.0/divisor;
}
if (newMax < max) {
newMax += 1.0/divisor;
}
int test2 = round((newMax - newMin) * divisor);
if (test2 >= 7) {
while (test2 % 6 != 0 && test2 % 5 != 0 && test2 % 4 != 0 && test2 % 3 != 0) {
test2++;
newMax += 1.0/divisor;
}
}
if (test2 % 6 == 0) {
test2 = 6;
} else if (test2 % 5 == 0) {
test2 = 5;
} else if (test2 % 4 == 0 || test2 == 2) {
test2 = 4;
} else if (test2 % 3 == 0) {
test2 = 3;
}
double *values = malloc(sizeof(double) * (test2 + 1));
for (int i = 0; i < test2 + 1; i++) {
values[i] = newMin + (newMax - newMin) * i / test2;
}
return values;
}
Any suggestions?
Here's a snippet of code that does something similar, though has a slightly different approach. The "units" refer to what your are plotting on the graph. So if your scale is so that one unit on your graph should be 20 pixels on screen, this function would return how many units each step should be. With that information you can then easily calculate what the axis values are and where to draw them.
- (float)unitsPerMajorGridLine:(float)pixelsPerUnit {
float amountAtMinimum, orderOfMagnitude, fraction;
amountAtMinimum = [[self minimumPixelsPerMajorGridLine] floatValue]/pixelsPerUnit;
orderOfMagnitude = floor(log10(amountAtMinimum));
fraction = amountAtMinimum / pow(10.0, orderOfMagnitude);
if (fraction <= 2) {
return 2 * pow(10.0, orderOfMagnitude);
} else if (fraction <= 5) {
return 5 * pow(10.0, orderOfMagnitude);
} else {
return 10 * pow(10.0, orderOfMagnitude);
}
}
Simple adaptation for JavaScript (thanks a lot to Johan Kool for source)
const step = (() => {let pixelPerUnit = height / (end - size)
, amountAtMinimum = minimumPixelsPerMajorGridLine / pixelPerUnit
, orderOfMagnitude = Math.floor(Math.log10(amountAtMinimum))
, fraction = amountAtMinimum / Math.pow(10.0, orderOfMagnitude);
let result;
if (fraction <= 2) {
result = 2 * Math.pow(10.0, orderOfMagnitude);
} else if (fraction <= 5) {
result = 5 * Math.pow(10.0, orderOfMagnitude);
} else {
result = 10 * Math.pow(10.0, orderOfMagnitude);
}})();
let arr = [];
arr.push(start);
let curVal = start - start % step + step
, pxRatio = height / (end - start);
while (curVal < end) {
arr.push(curVal);
curVal += step;
}
arr.push(end);