I have an app that that aims to record the camera's current X,Y,Z coordinates and print them out. It does this properly with the code down below
func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) {
guard let pointOfView = sceneView.pointOfView else { return }
let transform = pointOfView.transform
let CamPosition = SCNVector3(transform.m41, transform.m42, transform.m43)
print(CamPosition)
I want to truncate the printed output since it's very long. I found this extension to truncate the values.
extension Double {
func truncate(places : Int)-> Double {
return Double(floor(pow(10.0, Double(places)) * self)/pow(10.0, Double(places)))
}
}
This works if I print something like this:
x = 1.123456789
print(x.truncate(places: 2))
but will not work if I print it out like this:
print(camRotation.truncate(palces:2))
The error it gives me says "Value of type 'SCNVector4' has no member 'truncate'"
is this a formatting issue on my end or do SCNVectors just not allow you to use extensions?
"Value of type 'SCNVector4' has no member 'truncate'"
camRotation is a SCNVector4.
Your extension is on Double, not SCNVector4.
/// Double, not SCNVector4
extension Double {
func truncate(places : Int)-> Double {
return Double(floor(pow(10.0, Double(places)) * self)/pow(10.0, Double(places)))
}
}
Extensions work on any type, but camRotation.truncate(palces:2) doesn't work, because the type doesn't match.
Ok, now on to the actual answer. Currently, your code says
let CamPosition = SCNVector3(transform.m41, transform.m42, transform.m43)
Just apply truncate to each of the components:
let CamPosition = SCNVector3(
transform.m41.truncate(places: 2),
transform.m42.truncate(places: 2),
transform.m43.truncate(places: 2)
)
print(CamPosition)
Because these components take in Float values, you should also change your extension to this:
extension Float {
func truncate(places: Int) -> Float {
return Float(floor(pow(10.0, Float(places)) * self)/pow(10.0, Float(places)))
}
}
Related
I'm working on a project which converts user's face to emoji. I use Apple's ARKit in this purpose.
I need to get the most probable option. I wrote this code:
func renderer(for anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor else {
return
}
let shapes = faceAnchor.blendShapes
let browDownLeft = shapes[.browDownLeft]!.doubleValue
let browInnerUp = shapes[.browInnerUp]!.doubleValue
let browOuterUpLeft = shapes[.browOuterUpLeft]!.doubleValue
let leftBrowMax = max(browDownLeft, browInnerUp, browOuterUpLeft)
switch leftBrowMax {
case browDownLeft:
userFace.leftBrow = .browDown
case browInnerUp:
userFace.leftBrow = .browInnerUp
case browOuterUpLeft:
userFace.leftBrow = .browOuterUp
default:
userFace.leftBrow = .any
}
}
I need to duplicate function's body six time (for brows, eyes and mouth sides), so I want to write it in a more convenient way. Is there any options in Swift like numpy's argmax function? Also I need to specify arguments range, because arguments for mouth should not be compared with arguments for brows.
You can use something like this:
func maxBlendShape(for blendShapes: [ARFaceAnchor.BlendShapeLocation], in shape: [ARFaceAnchor.BlendShapeLocation: NSNumber]) -> Double? {
blendShapes
.compactMap { shape[$0] }
.map(\.doubleValue)
.max()
}
Usage would then be something like this:
maxBlendShape(for: [.browDownLeft, .browInnerUp, .browOuterUpLeft], in: faceAnchor.blendShapes)
Note: Nothing here is specific to ARKit, you just filter some keys from the dictionary and find their max value. A generic solution could look like this:
extension Dictionary where Value == NSNumber {
func maxDouble(for keys: [Key]) -> Double? {
keys
.compactMap({self[$0]})
.map(\.doubleValue)
.max()
}
}
faceAnchor.blendShapes.maxDouble(for: [.browInnerUp, .browDownLeft, .browOuterUpLeft])
func colorBall() {
let colorize1 = SKAction.colorizeWithColor(UIColor.redColor(), colorBlendFactor: 1.0, duration: 0.1)
let colorize2 = SKAction.colorizeWithColor(UIColor.greenColor(), colorBlendFactor: 1.0, duration: 0.1)
let colorize3 = SKAction.colorizeWithColor(UIColor.blueColor(), colorBlendFactor: 1.0, duration: 0.1)
let actions = [colorize1, colorize2, colorize3]
let randomIndex = Int(arc4random_uniform(3))
self.Ball.runAction(actions[randomIndex])
}
var colorBucket = [UIColor]()
func randomColor() -> UIColor {
if colorBucket.isEmpty {
fillBucket()
}
let randomIndex = Int(arc4random_uniform(UInt32(colorBucket.count)))
let randomColor = colorBucket[randomIndex]
colorBucket.removeAtIndex(randomIndex)
return randomColor
}
func fillBucket() {
colorBucket = [UIColor.redColor(), UIColor.greenColor(), UIColor.blueColor()]
}
When I run this code in my game, and print out the color value of my ball, it sometimes prints out numbers like this:
UIDeviceRGBColorSpace 1 2.98023e-08 2.98023e-08 1
Why does it do this? I just want it to say: UIDeviceRGBColorSpace 0 0 1 1 if it's blue, IDeviceRGBColorSpace 1 0 0 1 if it's red, etc.
How can I keep those numbers from going higher than one, or much lower than one? What makes them do that in my code?
Based partially on zneak's answer I've made this (no thrills or frills) extension to UIColor which could come in handy:
extension UIColor {
func isVisuallyEqual(color: UIColor) -> Bool {
let compareValues = CGColorGetComponents(color.CGColor)
let values = CGColorGetComponents(self.CGColor)
let count = CGColorGetNumberOfComponents(self.CGColor)
if count != CGColorGetNumberOfComponents(color.CGColor) {
debugPrint("color-parameter has mismatching colorSpace")
return false
}
for index in 0..<count {
if !fuzzyFloatCompares(values[index], float2: compareValues[index]) {
return false
}
}
return true
}
private func fuzzyFloatCompares(float1: CGFloat, float2: CGFloat) -> Bool {
let difference = float1 - float2
return difference >= -1/256 && difference <= 1/256
}
}
2.98023e-08 is 0.0000000298023. If you look up the value 2.98023e-08 on Google or another search engine, you can find several examples of people getting that value because of rounding errors. Rounding errors occur because of how computers treat floating-point numbers.
It's probably a rounding error from the interpolation code that colorizeWithColor uses, and you get it instead of zero. For practical purposes, when talking about color components about to be displayed to an end user, I'd say that anything smaller than 1/256 can be considered to be zero.
You can test if two floating point numbers are "about equal" like this (typed on my phone, not really guaranteed to work):
func areAboutTheSame(a: Double, b: Double) -> Bool {
let difference = a-b
return difference < 1/256 && difference > -1/256
}
I want to be able to take a longitude/latitude value of 19.3563443 and turn it into 19.35634 (five decimal places). Is there any easy way to do this?
You can use NumberFormatter:
extension Formatter {
static let number = NumberFormatter()
}
extension FloatingPoint {
func fractionDigitsRounded(to digits: Int, roundingMode: NumberFormatter.RoundingMode = .halfEven) -> String {
Formatter.number.roundingMode = roundingMode
Formatter.number.minimumFractionDigits = digits
Formatter.number.maximumFractionDigits = digits
return Formatter.number.string(for: self) ?? ""
}
}
let value = 19.3563443
let roundedToFive = value.fractionDigitsRounded(to: 5) // "19.35634"
Round, get double value back.
NSString(format: "%.5f", myfloat).doubleValue
This won't truncate, but may be accurate enough for long/lat to 5 decimal places and is quick.
This is a math problem, not a coding problem. Converting between String and Double is not very attractive. In mathematics, truncating digits to the right of the decimal is called reducing scale. To truncate a non-integer, all you need to do is move the decimal to the right however many places you want it truncated (multiply it by 10 to move it one, multiply it by 100 to move it two, etc.), convert it into an integer (which drops everything after the new decimal), and then divide it back by the original multiplier to return the decimal to its original position.
To reduce pi's scale to 3:
move the decimal right: 3.1415927 * 10^3 = 3141.5927
zero the fraction = 3141.0
move the decimal back: 3141 / 10^3 = 3.141
Add it as an extension to Double for convenience:
extension Double {
/// Reduces a double's scale (truncates digits right of the decimal).
func reduceScale(to places: Int) -> Double {
guard !self.isNaN,
!self.isInfinite else {
return 0 // if the double is not a number or infinite, this func will throw an exception error
}
let multiplier = pow(10, Double(places))
let newDecimal = multiplier * self // move the decimal right
let truncated = Double(Int(newDecimal)) // drop the fraction
let originalDecimal = truncated / multiplier // move the decimal back
return originalDecimal
}
}
let dirty = 3.1415927
let clean = dirty.reduceScale(to: 2) // 3.14
I suggest using NSDecimalNumber. I don't know if this is actually better or worse than #Leonardo's answer. But when dealing with numbers I prefer to stick to numbers, and not convert to/from strings. The code below generates a function for a requested number of digits that can be reused as needed.
This truncates by rounding down. To get normal arithmetic rounding, change it to use .RoundPlain instead of .RoundDown.
func makeRounder(#digits: Int16) -> (Double) -> Double {
class RoundHandler : NSObject, NSDecimalNumberBehaviors {
var roundToDigits : Int16
init(roundToDigits: Int16) {
self.roundToDigits = roundToDigits
}
#objc func scale() -> Int16 {
println("Digits: \(digits)")
return roundToDigits
}
#objc func roundingMode() -> NSRoundingMode {
return NSRoundingMode.RoundDown
}
#objc func exceptionDuringOperation(operation: Selector, error: NSCalculationError, leftOperand: NSDecimalNumber, rightOperand: NSDecimalNumber) -> NSDecimalNumber? {
return nil
}
}
let roundHandler = RoundHandler(roundToDigits:digits)
func roundDigits(original:Double) -> Double {
let decimal = NSDecimalNumber(double: original)
let rounded = decimal.decimalNumberByRoundingAccordingToBehavior(roundHandler)
return rounded.doubleValue
}
println("Digits: \(digits)")
return roundDigits
}
let roundTo5 = makeRounder(digits:Int16(5))
roundTo5(19.3563443)
roundTo5(1.23456789)
I found a different answer on a different site and wanted to share it:
let x = 129.88888888777
let y = Double(round(1000*x)/1000)
y returns 129.888
thanks to swift's "round" function.
let x = (long*100000).rounded(.towardZero)/100000
I got error: '(Int, Int)' is not identical to 'CGPoint'
How to convert a (Int, Int) to CGPoint
let zigzag = [(100,100),
(100,150),(150,150),
(150,200)]
override func drawRect(rect: CGRect)
{
// Get the drawing context.
let context = UIGraphicsGetCurrentContext()
// Create the shape (a vertical line) in the context.
CGContextBeginPath(context)
//Error is here
CGContextAddLines(context, zigzag, zigzag.count)
// Configure the drawing environment.
CGContextSetStrokeColorWithColor(context,UIColor.redColor().CGColor)
// Request the system to draw.
CGContextStrokePath(context)
}
CGContextAddLines() expects an array of CGPoint. If you already have an
array of (Int, Int) tuples then you can convert it with
let points = zigzag.map { CGPoint(x: $0.0, y: $0.1) }
An alternate way to avoid the boilerplate code required to create instances of the same type is to make CGPoint implement the ArrayLiteralConvertible, making it initializable by assigning an array of CGFloat:
extension CGPoint : ArrayLiteralConvertible {
public init(arrayLiteral elements: CGFloat...) {
self.x = elements.count > 0 ? elements[0] : 0.0
self.y = elements.count > 1 ? elements[1] : 0.0
}
}
and then use it as follows:
let zigzag:[CGPoint] = [
[100,100],
[100,150],
[150,150],
[150,200]
]
A few notes:
stylistically, it doesn't look good - it would be good if literals could be used for tuples, but I am not aware of any way to do that
if an empty array is used, the CGPoint is initialized with x = 0 and y = 0
if an array with one element is used, it is initialized with y = 0
if more than 2 values are used, all the ones after the 2nd are ignored
If it tells you to use CGPoint, use it! Just (number,number) is a pair of ints.
let zigzag = [CGPointMake(100,100),
CGPointMake(100,150),CGPointMake(150,150),
CGPointMake(150,200)]
Yet another:
func CGPoints(points:(x:CGFloat, y:CGFloat)...) -> [CGPoint] {
return map(points) { CGPoint($0) }
}
let zigzag = CGPoints(
(100,100),(100,150),(150,150),(150,200)
)
So, I'm trying to develop a simple game written in Swift, but I'm having trouble doing a pretty simple thing. I can't manage to create a random CGPoint... When using arc4random, a compiler error shows up telling me that I can't use Int32 in a CGPoint. So, Is there any way to do this? Any workaround? Thanks!
can also maybe make use of Swift's extensions of base types to create a reusable set of overloaded functions of CGPoint. Maybe something like:
extension CGPoint {
func random()->CGPoint { return CGPoint(x:Int(arc4random()%1000),y:Int(arc4random()%1000))}
func random(range:Int)->CGPoint {
return CGPoint(x:Int(arc4random()%range),y:Int(arc4random()%range))}
func random(rangeX:Int, rangeY:Int)->CGPoint {
return CGPoint(x:Int(arc4random()%rangeX),y:Int(arc4random()%rangeY))}
}
You can then write random CGPoints like this:
var p = CGPoint.random()
//random x and y with a range of 1000
or
var p = CGPoint.random(range:100)
//random x and y with a range of 100
or
var p = CGPoint.random(rangeX:200, rangeY:400)
//random x up to 200 and random y with a range of up to 400
Granted, I'm not in the Xcode IDE at the moment to check syntax / if it compiles correctly but hope that could be of help :-)
...
//////////////////
Swift 1.2 Update
//////////////////
Seems these type-level function calls are not allowed anymore with extensions...at least for CGPoint; probably because CGPoint is actually a struct and not a class based on the current IOS documentation.
Here's a more in-depth version of my extension that allows for Range types.
This is confirmed working as of XCode 6.4 Beta
(Github repository with Playground file found here:
https://github.com/princetrunks/Random-CGPoint-Extension)
//creates random CGPoints in Swift as of XCode Beta 6.4 (6E7)
extension CGPoint {
/*private functions that help alleviate the ambiguity of the modulo bias
and nested typecasting as well as recycle similar functionality
for either Int or Range type parameter inputs */
private func randomInt(num:Int) ->Int{
return Int(arc4random_uniform(UInt32(num)))
}
private func randomIntFromRange(numRange:Range<Int>) ->Int{
return Int(arc4random_uniform(UInt32((numRange.endIndex - numRange.startIndex) + numRange.startIndex)))
}
//private variable for the default range
private var defaultRange : Int{
get{return 1000}
}
//(a) public variable that creates a default random CGPoint
static var randomPoint = CGPoint.zeroPoint.random()
//(b) default random point creation
func random()->CGPoint { return CGPoint(x:randomInt(defaultRange),y:randomInt(defaultRange))}
//(c) using an Int parameter for both the random x and y range
func random(range:Int)->CGPoint {
return CGPoint(x:randomInt(range),y:randomInt(range))
}
//(d) allows for the specification of the x and y random range
func random(#rangeX:Int, rangeY:Int)->CGPoint {
return CGPoint(x:randomInt(rangeX),y:randomInt(rangeY))
}
//(e) allows the same functionality as (c) but with a Range<Int> type parameter
func random(range:Range<Int>)->CGPoint {
return CGPoint(x:randomIntFromRange(range), y:randomIntFromRange(range))
}
//(f) allows the same functionality as (d) but with a Range<Int> type parameter
func random(#rangeX:Range<Int>, rangeY:Range<Int> )->CGPoint {
return CGPoint(x:randomIntFromRange(rangeX), y:randomIntFromRange(rangeY))
}
}
Here's how we can test this extension:
//(a)
let r = CGPoint.randomPoint
//(b)
var anotherRandomPoint = r.random()
//(c)
anotherRandomPoint = r.random(1000)
//(d)
anotherRandomPoint = r.random(0...1000)
//(e)
anotherRandomPoint = r.random(rangeX:90, rangeY: 2000)
//(f)
anotherRandomPoint = r.random(rangeX:0...90, rangeY: 0...2000)
// generates 100 random CGPoints between -1000 and 999
for _ in 0...100 {
anotherRandomPoint.random(-1000...1000)
}
hi what about constructing an Int? Int(arc4random())
e.g.
var p = CGPoint(x:Int(arc4random()%1000),y:Int(arc4random()%1000))
Swift 4,5
// Add some range
let minX = 0
let maxX = 100
let minY = 0
let maxY = 100
let randomX = CGFloat.random(in: minX..<maxX)
let randomY = CGFloat.random(in: minY..<maxY)
let random = CGPoint(x: randomX, y: randomY)
Here is an extension on CGPoint to generate random point based on your x,y closed range.
extension CGPoint {
static func randPoint(xRange: ClosedRange<CGFloat>, yRange: ClosedRange<CGFloat>) -> Self {
let x = CGFloat.random(in: xRange)
let y = CGFloat.random(in: yRange)
return .init(x: x, y: y)
}
}