crc-16 cccitt problem - incorrect calculation - swift

Trying to implement this CRC16 CITT checksum within my bluetooth ios mobile application:
extension Data {
typealias bit_order_16 = (_ value: UInt16) -> UInt16
typealias bit_order_8 = (_ value: UInt8) -> UInt8
func crc16Check() -> UInt16 {
let data = self as! NSData
let bytes = UnsafePointer<UInt8>(data.bytes.assumingMemoryBound(to: UInt8.self))
let length = data.length
return crc16ccitt(message: bytes, nBytes: length)
}
func straight_16(value: UInt16) -> UInt16 {
return value
}
func reverse_16(value: UInt16) -> UInt16 {
var value = value
var reversed: UInt16 = 0
for i in stride(from: 0, to: 16, by: 1) {
reversed <<= 1
reversed |= (value & 0x1)
value >>= 1
}
return reversed
}
func straight_8(value: UInt8) -> UInt8 {
return value
}
func reverse_8(value: UInt8) -> UInt8 {
var value = value
var reversed: UInt8 = 0
for i in stride(from: 0, to: 8, by: 1) {
reversed <<= 1
reversed |= (value & 0x1)
value >>= 1
}
return reversed
}
func crc16(message: UnsafePointer<UInt8>, nBytes: Int, data_order: bit_order_8, remainder_order: bit_order_16, remainder: UInt16, polynomial: UInt16) -> UInt16 {
var remainder = remainder
for byte in stride(from: 0, to: nBytes, by: 1) {
remainder ^= UInt16(data_order(message[byte]) << 8)
var bit = 8
while bit > 0 {
if (remainder & 0x8000) != 0 {
remainder = (remainder << 1) ^ 0x1021
} else {
remainder = (remainder << 1)
}
bit -= 1
}
}
return remainder_order(remainder)
}
func crc16ccitt(message: UnsafePointer<UInt8>, nBytes: Int) -> UInt16 {
return crc16(message: message, nBytes: nBytes, data_order: straight_8, remainder_order: straight_16, remainder: 0xffff, polynomial: 0x1021)
}
func crc16ccitt_xmodem(message: UnsafeMutablePointer<UInt8>, nBytes: Int) -> UInt16 {
return crc16(message: message, nBytes: nBytes, data_order: straight_8, remainder_order: straight_16, remainder: 0x0000, polynomial: 0x1021)
}
func crc16ccitt_kermit(message: UnsafeMutablePointer<UInt8>, nBytes: Int) -> UInt16 {
let swap = crc16(message: message, nBytes: nBytes, data_order: reverse_8, remainder_order: reverse_16, remainder: 0x0000, polynomial: 0x1021)
return swap << 8 | swap >> 8
}
func crc16ccitt_1d0f(message: UnsafeMutablePointer<UInt8>, nBytes: Int) -> UInt16 {
return crc16(message: message, nBytes: nBytes, data_order: straight_8, remainder_order: straight_16, remainder: 0x1d0f, polynomial: 0x1021)
}
func crc16ibm(message: UnsafeMutablePointer<UInt8>, nBytes: Int) -> UInt16 {
return crc16(message: message, nBytes: nBytes, data_order: reverse_8, remainder_order: reverse_16, remainder: 0x0000, polynomial: 0x8005)
}
}
I set up a fixed data type of
let tData = Data.init(bytes: [0x05, 0x02, 0x03] as [UInt8], count: 3)
let crcString = String.init(format: "CRC error, calculated: %04X", tData.crc16Check())
print(crcString)
//Prints out CC9C
CC9C is incorrect.
The answer should be: 716D
Can't seem to find the error within the crc16 ccitt calculation. Can someone help to please spot the issue, as I'm really not sure where it's wrong. Been spending too long trying to figure it out. Would appreciate any time of help from the community. Thank you.

The error is the wrong order of operations here:
remainder ^= UInt16(data_order(message[byte]) << 8)
The 8-bit value data_order(message[byte]) is shifted by 8 bits to the left – the result will always be zero. It should be
remainder ^= UInt16(data_order(message[byte])) << 8
so that the number is converted to a 16-bit value before shifting it to the left.
That problem might not occur in a similar C program, where all integral operands are promoted to int before doing the calculation – such implicit type conversions are not done in Swift.
Another error is that your func crc16() uses the fixed polynomial 0x1021 instead of the polynomial argument. That causes a wrong result for the crc16ibm checksum.
Note also that the conversion to NSData in crc16Check() is not needed. That method can be simplified to
func crc16Check() -> UInt16 {
return self.withUnsafeBytes { [length = self.count] in
crc16ccitt(message: $0, nBytes: length)
}
}
Even better: Make all methods operator on self instead of passing Unsafe(Mutable)Pointers and lengths around:
extension Data {
typealias bit_order_16 = (_ value: UInt16) -> UInt16
typealias bit_order_8 = (_ value: UInt8) -> UInt8
func straight_16(value: UInt16) -> UInt16 {
return value
}
func reverse_16(value: UInt16) -> UInt16 {
var value = value
var reversed: UInt16 = 0
for _ in 0..<16 {
reversed <<= 1
reversed |= (value & 0x1)
value >>= 1
}
return reversed
}
func straight_8(value: UInt8) -> UInt8 {
return value
}
func reverse_8(value: UInt8) -> UInt8 {
var value = value
var reversed: UInt8 = 0
for _ in 0..<8 {
reversed <<= 1
reversed |= (value & 0x1)
value >>= 1
}
return reversed
}
func crc16(data_order: bit_order_8, remainder_order: bit_order_16, remainder: UInt16, polynomial: UInt16) -> UInt16 {
var remainder = remainder
for byte in self {
remainder ^= UInt16(data_order(byte)) << 8
for _ in 0..<8 {
if (remainder & 0x8000) != 0 {
remainder = (remainder << 1) ^ polynomial
} else {
remainder = (remainder << 1)
}
}
}
return remainder_order(remainder)
}
func crc16ccitt() -> UInt16 {
return crc16(data_order: straight_8, remainder_order: straight_16, remainder: 0xffff, polynomial: 0x1021)
}
func crc16ccitt_xmodem() -> UInt16 {
return crc16(data_order: straight_8, remainder_order: straight_16, remainder: 0x0000, polynomial: 0x1021)
}
func crc16ccitt_kermit() -> UInt16 {
let swap = crc16(data_order: reverse_8, remainder_order: reverse_16, remainder: 0x0000, polynomial: 0x1021)
return swap.byteSwapped
}
func crc16ccitt_1d0f() -> UInt16 {
return crc16(data_order: straight_8, remainder_order: straight_16, remainder: 0x1d0f, polynomial: 0x1021)
}
func crc16ibm() -> UInt16 {
return crc16(data_order: reverse_8, remainder_order: reverse_16, remainder: 0x0000, polynomial: 0x8005)
}
}
Example usage (code updated for Swift 4+):
let tData = Data([0x05, 0x02, 0x03])
print(String(format: "crc16ccitt: %04X", tData.crc16ccitt())) // 716D
print(String(format: "crc16ccitt_xmodem: %04X", tData.crc16ccitt_xmodem())) // BDF1
print(String(format: "crc16ccitt_kermit: %04X", tData.crc16ccitt_kermit())) // 9638
print(String(format: "crc16ccitt_1d0f: %04X", tData.crc16ccitt_1d0f())) // ACFD
print(String(format: "crc16ibm: %04X", tData.crc16ibm())) // 6051
These numbers coincide with the results from this Online CRC calculator.

Related

withUnsafeBytes + Generic type behavior

I have a function that allows me to read a number (Integer, Double etc) from a binary file using generic types. For example if I expect a Int64, il will read 8 bytes...
// A simple function that read n bytes from a FileHandle and returns
// the data
public func read(chunkSize: Int) -> Data {
return self.handle!.readData(ofLength: chunkSize)
}
// A function that reads the proper amount of bytes specified
// by the return type which in my case would be an integer
public func readNumber<I>() -> I? {
let data: Data = self.read(chunkSize: MemoryLayout<I>.size)
if data.count == 0 {
return nil
}
return data.withUnsafeBytes { $0.pointee }
}
The readNumber randomly returns nil for no reason. Not from the count check but from the last line.
However it perfectly works when I cast to I like so :
return data.withUnsafeBytes { $0.pointee } as I
Why is that ?
EDIT:
I reproduced this using Playgrounds :
class Test {
public func read(chunkSize: Int) -> Data {
return Data(repeating: 1, count: chunkSize)
}
public func readNumber<T>() -> T? {
let data: Data = read(chunkSize: MemoryLayout<T>.size)
if data.count == 0 {
return nil
}
return data.withUnsafeBytes { $0.pointee }
}
public func example() {
let value2: Double = readNumber()!
print(value2)
}
}
let test = Test()
for i in 0..<1000 {
test.example()
}
Seems I need to correct my comment a little. Even when Swift works consistently as programmed, the result may seem randomly changing, when you have some memory issue like accessing out of bounds.
First prepare a magical extension for UnsafePointer:
extension UnsafePointer {
var printingPointee: Pointee {
print(Pointee.self) //<- Check how Swift inferred `Pointee`
return self.pointee
}
}
And modify your EDIT code a little:
class Test {
public func read(chunkSize: Int) -> Data {
return Data(repeating: 1, count: chunkSize)
}
public func readNumber<T>() -> T? {
let data: Data = read(chunkSize: MemoryLayout<T>.size)
if data.count == 0 {
return nil
}
print(T.self) //<- Check how Swift inferred `T`
return data.withUnsafeBytes { $0.printingPointee }
}
public func example() {
let value2: Double = readNumber()!
print(value2)
}
}
let test = Test()
for _ in 0..<1000 {
test.example()
}
Output:
Double
Optional<Double>
7.748604185489348e-304
Double
Optional<Double>
Thread 1: Fatal error: Unexpectedly found nil while unwrapping an
Optional value
How many pairs of Double and Optional<Double> shown would be seemingly random, but the cause of this behavior is quite clear.
In this line return data.withUnsafeBytes { $0.printingPointee }, Swift infers the type of $0 as UnsafePointer<Optional<Double>>.
In the current implementation of Swift, Optional<Double> occupies 9 bytes in memory:
print(MemoryLayout<Optional<Double>>.size) //-> 9
So, $0.pointee accesses 9 bytes starting from the pointer, although the pointer is pointing to the region of 8-byte:
|+0|+1|+2|+3|+4|+5|+6|+7|+8|
+--+--+--+--+--+--+--+--+
01 01 01 01 01 01 01 01 ??
<-taken from the Data->
As you know, the extra 9th (+8) byte cannot be predictable and may seemingly be random, which is an indicator of nil in Optional<Double>.
Exactly the same inference is working in your code. In your readNumber<T>(), the return type is clearly declared as T?, so, in the line return data.withUnsafeBytes { $0.pointee }, it is very natural that Swift infers the type of $0.pointee as Double? aka Optional<Double>.
You know you can control this type inference with adding as T.

Compiler issues when chaining multiple calls to the same method

I'm trying to fix this bug by overloading prefix(_ maxLength) for all lazy sequences and collections, but I'm running into weird compiler issues.
I'm using Xcode 9.0 beta 6 (9M214v), but it's also reproducable in all of the latest snapshots for 4.0.
Given the following iterator,...
public struct LazyPrefixIterator <Base: IteratorProtocol>: IteratorProtocol {
public typealias Element = Base.Element
private var baseIterator: Base
private let maxLength: Int
private var taken = 0
internal init (_ baseIterator: Base, _ maxLength: Int) {
precondition(maxLength >= 0, "Can't take a prefix of negative length from an iterator")
self.baseIterator = baseIterator
self.maxLength = maxLength
}
public mutating func next () -> Element? {
if self.taken >= self.maxLength {
return nil
}
self.taken += 1
return self.baseIterator.next()
}
}
...the following sequence,...
public struct LazyPrefixSequence <Base: Sequence>: LazySequenceProtocol {
public typealias Iterator = LazyPrefixIterator<Base.Iterator>
private let baseSequence: Base
private let maxLength: Int
internal init (_ baseSequence: Base, _ maxLength: Int) {
precondition(maxLength >= 0, "Can't take a prefix of negative length from a sequence")
self.baseSequence = baseSequence
self.maxLength = maxLength
}
public func makeIterator() -> Iterator {
return LazyPrefixIterator(self.baseSequence.makeIterator(), self.maxLength)
}
}
...the following collection...
public struct LazyPrefixCollection <Base: Collection>: LazyCollectionProtocol {
public typealias Iterator = LazyPrefixIterator<Base.Iterator>
public typealias Index = Base.Index
public typealias Element = Base.Element
private let baseCollection: Base
private let maxLength: Int
internal init (_ baseCollection: Base, _ maxLength: Int) {
precondition(maxLength >= 0, "Can't take a prefix of negative length from a collection")
self.baseCollection = baseCollection
self.maxLength = maxLength
}
public func makeIterator() -> Iterator {
return LazyPrefixIterator(self.baseCollection.makeIterator(), self.maxLength)
}
public var startIndex: Index {
return self.baseCollection.startIndex
}
public var endIndex: Index {
var maxLength = 0
var index = self.baseCollection.startIndex
let baseCollectionEndIndex = self.baseCollection.endIndex
while maxLength < self.maxLength && index != baseCollectionEndIndex {
index = self.baseCollection.index(after: index)
maxLength += 1
}
return index
}
public func index (after i: Index) -> Index {
precondition(i != self.endIndex, "Can't advance past endIndex")
return self.baseCollection.index(after: i)
}
public subscript (position: Index) -> Element {
precondition(position >= self.startIndex && position < self.endIndex, "Index out of range")
return self.baseCollection[position]
}
}
...and the following overloads (to squash ambiguity issues),...
public extension LazySequence {
func prefix (_ maxLength: Int) -> LazyPrefixSequence<Elements> {
return LazyPrefixSequence(self.elements, maxLength)
}
}
public extension LazySequenceProtocol {
func prefix (_ maxLength: Int) -> LazyPrefixSequence<Self> {
return LazyPrefixSequence(self, maxLength)
}
}
public extension LazyCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<Base> {
return LazyPrefixCollection(self.elements, maxLength)
}
}
public extension LazyCollectionProtocol {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<Self> {
return LazyPrefixCollection(self, maxLength)
}
}
public extension LazyDropWhileBidirectionalCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<LazyDropWhileBidirectionalCollection<Base>> {
return LazyPrefixCollection(self, maxLength)
}
}
public extension LazyPrefixWhileBidirectionalCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<LazyPrefixWhileBidirectionalCollection<Base>> {
return LazyPrefixCollection(self, maxLength)
}
}
public extension LazyRandomAccessCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<LazyRandomAccessCollection<Base>> {
return LazyPrefixCollection(self, maxLength)
}
}
...the following works as expected (each one of these prints true)...
print(Array(AnySequence(sequence(first: 0, next: {$0 + 1})).lazy.prefix(2)) == [0, 1])
print(Array(sequence(first: 0, next: {$0 + 1}).lazy.drop(while: {_ in false}).prefix(2)) == [0, 1])
print(Array(sequence(first: 0, next: {$0 + 1}).lazy.filter{_ in true}.prefix(2)) == [0, 1])
print(Array(sequence(first: 0, next: {$0 + 1}).lazy.map{$0}.prefix(2)) == [0, 1])
print(Array(sequence(first: 0, next: {$0 + 1}).lazy.prefix(while: {_ in true}).prefix(2)) == [0, 1])
print(Array(sequence(first: 0, next: {$0 + 1}).lazy.prefix(2)) == [0, 1])
print(Array(AnyCollection([0, 1, 2]).lazy.prefix(2)) == [0, 1])
print(Array(([0, 1, 2].lazy as LazyBidirectionalCollection).prefix(2)) == [0, 1])
print(Array([0, 1, 2].lazy.drop(while: {_ in false}).prefix(2)) == [0, 1])
print(Array(([0, 1, 2].lazy.drop(while: {_ in false}) as LazyDropWhileCollection).prefix(2)) == [0, 1])
print(Array([0, 1, 2].lazy.filter{_ in true}.prefix(2)) == [0, 1])
print(Array(([0, 1, 2].lazy.filter{_ in true} as LazyFilterCollection).prefix(2)) == [0, 1])
print(Array(([0, 1, 2].lazy.map{$0} as LazyMapBidirectionalCollection).prefix(2)) == [0, 1])
print(Array(([0, 1, 2].lazy.map{$0} as LazyMapCollection).prefix(2)) == [0, 1])
print(Array([0, 1, 2].lazy.map{$0}.prefix(2)) == [0, 1])
print(Array([0, 1, 2].lazy.prefix(while: {_ in true}).prefix(2)) == [0, 1])
print(Array(([0, 1, 2].lazy.prefix(while: {_ in true}) as LazyPrefixWhileCollection).prefix(2)) == [0, 1])
print(Array([0, 1, 2].lazy.prefix(2)) == [0, 1])
..., but, when chaining the method multiple times on a collection, weird compiler behaviour occurs. The following works with a return type of LazyPrefixCollection<LazyRandomAccessCollection<[Int]>>:
_ = [0, 1, 2].lazy.prefix(3)
The following works too, with a return type of LazyPrefixCollection<LazyPrefixCollection<LazyRandomAccessCollection<[Int]>>>:
_ = [0, 1, 2].lazy.prefix(3).prefix(3)
But once we add another method, it hiccups. It tells me that Expression type '()' is ambiguous without more context:
_ = [0, 1, 2].lazy.prefix(3).prefix(3).prefix(3)
If we add another one it segment faults while type-checking:
_ = [0, 1, 2].lazy.prefix(3).prefix(3).prefix(3).prefix(3)
Of course, creating intermediate variables for each 'step' works:
let a = [0, 1, 2].lazy.prefix(3)
let b = a.prefix(3)
let c = b.prefix(3)
let d = c.prefix(3)
// Etc.
It's also worth noting that it works when we use a sequence instead of a collection:
_ = sequence(first: 0, next: {(e: Int) -> Int in e + 1}).lazy.prefix(3).prefix(3).prefix(3).prefix(3).prefix(3)
Chaining multiple maps or any of the other methods from the standard library on a collection doesn't cause any issues. The compiler gladly excepts this monstrosity:
_ = [0, 1, 2].lazy.map{$0}.map{$0}.map{$0}.map{$0}.map{$0}.map{$0}
Which makes me believe I'm doing something wrong in my code, particularly in LazyPrefixCollection.
What could be causing this behaviour?
Adding overloads for prefix(_ maxLength) on LazyPrefixSequence and LazyPrefixCollection makes all compiler issues go away. The code then becomes the following:
public struct LazyPrefixIterator <Base: IteratorProtocol>: IteratorProtocol {
public typealias Element = Base.Element
private var baseIterator: Base
private let maxLength: Int
private var taken = 0
internal init (_ baseIterator: Base, _ maxLength: Int) {
precondition(maxLength >= 0, "Can't take a prefix of negative length from an iterator")
self.baseIterator = baseIterator
self.maxLength = maxLength
}
public mutating func next () -> Element? {
if self.taken >= self.maxLength {
return nil
}
self.taken += 1
return self.baseIterator.next()
}
}
public struct LazyPrefixSequence <Base: Sequence>: LazySequenceProtocol {
public typealias Iterator = LazyPrefixIterator<Base.Iterator>
private let baseSequence: Base
private let maxLength: Int
internal init (_ baseSequence: Base, _ maxLength: Int) {
precondition(maxLength >= 0, "Can't take a prefix of negative length from a sequence")
self.baseSequence = baseSequence
self.maxLength = maxLength
}
public func makeIterator() -> Iterator {
return LazyPrefixIterator(self.baseSequence.makeIterator(), self.maxLength)
}
}
public extension LazyPrefixSequence where Base.SubSequence: Sequence {
func prefix (_ maxLength: Int) -> LazyPrefixSequence {
return LazyPrefixSequence(self.baseSequence, Swift.min(self.maxLength, maxLength))
}
}
public struct LazyPrefixCollection <Base: Collection>: LazyCollectionProtocol {
public typealias Iterator = LazyPrefixIterator<Base.Iterator>
public typealias Index = Base.Index
public typealias Element = Base.Element
private let baseCollection: Base
private let maxLength: Int
internal init (_ baseCollection: Base, _ maxLength: Int) {
precondition(maxLength >= 0, "Can't take a prefix of negative length from a collection")
self.baseCollection = baseCollection
self.maxLength = maxLength
}
public func makeIterator() -> Iterator {
return LazyPrefixIterator(self.baseCollection.makeIterator(), self.maxLength)
}
public var startIndex: Index {
return self.baseCollection.startIndex
}
public var endIndex: Index {
var maxLength = 0
var index = self.baseCollection.startIndex
while maxLength < self.maxLength && index != self.baseCollection.endIndex {
index = self.baseCollection.index(after: index)
maxLength += 1
}
return index
}
public func index (after i: Index) -> Index {
precondition(i != self.endIndex, "Can't advance past endIndex")
return self.baseCollection.index(after: i)
}
public subscript (position: Index) -> Element {
precondition(position >= self.startIndex && position < self.endIndex, "Index out of range")
return self.baseCollection[position]
}
public func prefix (_ maxLength: Int) -> LazyPrefixCollection {
return LazyPrefixCollection(self.baseCollection, Swift.min(self.maxLength, maxLength))
}
}
public extension LazySequence {
func prefix (_ maxLength: Int) -> LazyPrefixSequence<Elements> {
return LazyPrefixSequence(self.elements, maxLength)
}
}
public extension LazySequenceProtocol {
func prefix (_ maxLength: Int) -> LazyPrefixSequence<Self> {
return LazyPrefixSequence(self, maxLength)
}
}
public extension LazyCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<Base> {
return LazyPrefixCollection(self.elements, maxLength)
}
}
public extension LazyCollectionProtocol {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<Self> {
return LazyPrefixCollection(self, maxLength)
}
}
public extension LazyDropWhileBidirectionalCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<LazyDropWhileBidirectionalCollection<Base>> {
return LazyPrefixCollection(self, maxLength)
}
}
public extension LazyPrefixWhileBidirectionalCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<LazyPrefixWhileBidirectionalCollection<Base>> {
return LazyPrefixCollection(self, maxLength)
}
}
public extension LazyRandomAccessCollection {
func prefix (_ maxLength: Int) -> LazyPrefixCollection<LazyRandomAccessCollection<Base>> {
return LazyPrefixCollection(self, maxLength)
}
}
Testing it:
let xs = [0, 1, 2, 3, 4].lazy.prefix(3).prefix(10).prefix(100).prefix(10).prefix(5).prefix(1)
let ys = sequence(first: 0, next: {$0 + 1}).lazy.prefix(3).prefix(10).prefix(100).prefix(10).prefix(5).prefix(1)
print(Array(xs)) // [0]
print(type(of: xs)) // LazyPrefixCollection<LazyRandomAccessCollection<Array<Int>>>
print(Array(ys)) // [0]
print(type(of: ys)) // LazyPrefixSequence<UnfoldSequence<Int, (Optional<Int>, Bool)>>
Feedback appreciated. Especially when it comes to the correct typealiases and where clauses. That stuff still feels like arbitrary voodoo black magic to me; if I don't put in the where Base.SubSequence: Sequence restriction on LazyPrefixSequence then it will ask me for a whole bunch of useless overloads on other methods. Why SubSequence doesn't conform to Sequence makes no sense to me.

Convert a Double to Hex notation in Swift

How do I convert a very large number into hex?
For example, 647751843213568900000 in hex is 0x231d5cd577654ceab3. I'm able to easily go from hex to double with:
let hex: Double = 0x231d5cd577654ceab3
However I can't work out how to go from Double back to hex. What am I missing?
The following does not work as it overflows when stored as an 'Int':
let hexConverted = String(647751843213568900000, radix: 16)
The basic algorithm (Swift 5) is the following:
func representationOf<T: FixedWidthInteger>(_ number: T, base: T) -> String {
var buffer: [Int] = []
var n = number
while n > 0 {
buffer.append(Int(n % base))
n /= base
}
return buffer
.reversed()
.map { String($0, radix: Int(base)) }
.joined()
}
print(representationOf(647751843213568900, base: 16))
Of course, this is what String(_:radix:) is doing so there is no need for us to implement it by ourselves.
Your real problem is not the encoding but the representation of big integers.
There are multiple implementations out there already, for example https://github.com/mkrd/Swift-Big-Integer. Some of them already have functions for hex encoding.
In Swift 4 it will be possible to declare your own implementation of higher IntXXX (conforming to FixedWidthInteger) and the problem will become a bit easier:
typealias Int128 = DoubleWidth<Int64>
typealias Int256 = DoubleWidth<Int128>
let longNumber = Int256("231d5cd577654ceab3", radix: 16)!
print(longNumber)
print(String(longNumber, radix: 16))
But unfortunately, the DoubleWidth is not implemented in Xcode 9 Beta 4 yet.
For some values your can also use the Decimal type. Using the algorithm written above:
extension Decimal {
func rounded(mode: NSDecimalNumber.RoundingMode) -> Decimal {
var this = self
var result = Decimal()
NSDecimalRound(&result, &this, 0, mode)
return result
}
func integerDivisionBy(_ operand: Decimal) -> Decimal{
let result = (self / operand)
return result.rounded(mode: result < 0 ? .up : .down)
}
func truncatingRemainder(dividingBy operand: Decimal) -> Decimal {
return self - self.integerDivisionBy(operand) * operand
}
}
extension Decimal {
init(_ string: String, base: Int) {
var decimal: Decimal = 0
let digits = Array(string)
.map { String($0) }
.map { Int($0, radix: base)! }
for digit in digits {
decimal *= Decimal(base)
decimal += Decimal(digit)
}
self.init(string: decimal.description)!
}
}
func representationOf(_ number: Decimal, base: Decimal) -> String {
var buffer: [Int] = []
var n = number
while n > 0 {
buffer.append((n.truncatingRemainder(dividingBy: base) as NSDecimalNumber).intValue)
n = n.integerDivisionBy(base)
}
return buffer
.reversed()
.map { String($0, radix: (base as NSDecimalNumber).intValue ) }
.joined()
}
let number = Decimal("231d5cd577654ceab3", base: 16)
print(number) // 647751843213568961203
print(representationOf(number, base: 16)) // 231d5cd577654ceab3
Note how your value got truncated when converted to Double.
here is my solution :
func toHex(number : Double) -> String
{
var n = number;
var reminders : [Double] = [];
while true
{
let reminder = n % 16;
n = floor(n/16.0);
reminders.append(reminder);
if(n == 0.0)
{
break;
}
}
var hex = "";
var i = reminders.count-1;
while(i > -1)
{
hex = hex + hexChar(reminders[i]);
i = i-1;
}
return hex;
}
func hexChar(n : Double) -> String
{
switch n
{
case 15: return "F";
case 14: return "E";
case 13: return "D";
case 12: return "C";
case 11: return "B";
case 10: return "A";
default: return String(Int(n))
}
}
toHex(647751843213568900000.0); //231D5CD577654C0000

Bit field larger than 64 shifts in Swift?

How would I construct an OptionSetType with a raw value greater than 64 bit shifts (i.e. Int64) that is still able to be encoded using NSCoder? I have more than 64 potential bitwise options to combine.
So I eventually had to create my own primitive struct which was a pain in the ass, since the library #appzYourLife provided does not actually meet every protocol required of UnsignedIntegerTypes. The following is an extension I wrote that actually allows me to write things like
let a: UInt256 = 30
let b: UInt256 = 1 << 98
print(a + b)
which would output to the console:
0x00000000:00000000:00000000:00000000:00000004:00000000:00000000:0000001E
The extension is pretty lengthy and does not yet implement multiplication and devision or bit-shifting numbers other than 1. This version also supports encoding with and NSCoder
//
// UInt256.swift
// NoodleKit
//
// Created by NoodleOfDeath on 7/10/16.
// Copyright © 2016 NoodleOfDeath. All rights reserved.
//
import Foundation
// Bit Shifting only supports lhs = 1
#warn_unused_result
public func << (lhs: UInt256, rhs: UInt256) -> UInt256 {
if lhs > 1 { print("Warning: Only supports binary bitshifts (i.e. 1 << n, where n < 256. Shifting any other numbers than 1 may result in unexpected behavior.") }
if rhs > 255 { fatalError("shift amount is larger than type size in bits") }
let shift = UInt64(rhs.parts[7]) % 32
let offset = Int(rhs.parts[7] / 32)
var parts = [UInt32]()
for i in (0 ..< 8) {
let part: UInt64 = (i + offset < 8 ? UInt64(lhs.parts[i + offset]) : 0)
let sum32 = UInt32(part << shift)
parts.append(sum32)
}
return UInt256(parts)
}
#warn_unused_result
public func >> (lhs: UInt256, rhs: UInt256) -> UInt256 {
if lhs > 1 { print("Warning: Only supports binary bitshifts (i.e. 1 << n, where n < 256. Shifting any other numbers than 1 may result in unexpected behavior.") }
if rhs > 255 { fatalError("shift amount is larger than type size in bits") }
let shift = UInt64(rhs.parts[7]) % 32
let offset = Int(rhs.parts[7] / 32)
var parts = [UInt32]()
for i in (0 ..< 8) {
let part: UInt64 = (i - offset > 0 ? UInt64(lhs.parts[i - offset]) : 0)
let sum32 = UInt32(part >> shift)
parts.append(sum32)
}
return UInt256(parts)
}
#warn_unused_result
public func == (lhs: UInt256, rhs: UInt256) -> Bool {
return lhs.parts == rhs.parts
}
#warn_unused_result
public func < (lhs: UInt256, rhs: UInt256) -> Bool {
for i in 0 ..< 8 {
guard lhs.parts[i] < rhs.parts[i] else { continue }
return true
}
return false
}
#warn_unused_result
public func > (lhs: UInt256, rhs: UInt256) -> Bool {
for i in 0 ..< 8 {
guard lhs.parts[i] > rhs.parts[i] else { continue }
return true
}
return false
}
#warn_unused_result
public func <= (lhs: UInt256, rhs: UInt256) -> Bool {
return lhs < rhs || lhs == rhs
}
#warn_unused_result
public func >= (lhs: UInt256, rhs: UInt256) -> Bool {
return lhs > rhs || lhs == rhs
}
/// Adds `lhs` and `rhs`, returning the result and trapping in case of
/// arithmetic overflow (except in -Ounchecked builds).
#warn_unused_result
public func + (lhs: UInt256, rhs: UInt256) -> UInt256 {
var parts = [UInt32]()
var carry = false
for i in (0 ..< 8).reverse() {
let lpart = UInt64(lhs.parts[i])
let rpart = UInt64(rhs.parts[i])
let comp = lpart == UInt64(UInt32.max) && rpart == UInt64(UInt32.max)
let sum64 = lpart + rpart + (carry || comp ? 1 : 0)
let sum32 = UInt32((sum64 << 32) >> 32)
carry = sum64 > UInt64(UInt32.max)
parts.insert(sum32, atIndex: 0)
}
return UInt256(parts)
}
/// Adds `lhs` and `rhs`, returning the result and trapping in case of
/// arithmetic overflow (except in -Ounchecked builds).
public func += (inout lhs: UInt256, rhs: UInt256) {
lhs = lhs + rhs
}
/// Subtracts `lhs` and `rhs`, returning the result and trapping in case of
/// arithmetic overflow (except in -Ounchecked builds).
#warn_unused_result
public func - (lhs: UInt256, rhs: UInt256) -> UInt256 {
var parts = [UInt32]()
var borrow = false
var gave = false
for i in (0 ..< 8).reverse() {
borrow = lhs.parts[i] < rhs.parts[i]
let lpart = UInt64(lhs.parts[i]) - (gave ? 1 : 0) + (borrow ? UInt64(UInt32.max) : 0)
let rpart = UInt64(rhs.parts[i])
let sum64 = lpart - rpart
let sum32 = UInt32((sum64 << 32) >> 32)
gave = borrow
parts.insert(sum32, atIndex: 0)
}
return UInt256(parts)
}
public func -= (inout lhs: UInt256, rhs: UInt256) {
lhs = lhs - rhs
}
/// Multiplies `lhs` and `rhs`, returning the result and trapping in case of
/// arithmetic overflow (except in -Ounchecked builds).
/// - Complexity: O(64)
#warn_unused_result
public func * (lhs: UInt256, rhs: UInt256) -> UInt256 {
// TODO: - Not Implemented
return UInt256()
}
public func *= (inout lhs: UInt256, rhs: UInt256) {
lhs = lhs * rhs
}
/// Divides `lhs` and `rhs`, returning the result and trapping in case of
/// arithmetic overflow (except in -Ounchecked builds).
#warn_unused_result
public func / (lhs: UInt256, rhs: UInt256) -> UInt256 {
// TODO: - Not Implemented
return UInt256()
}
public func /= (inout lhs: UInt256, rhs: UInt256) {
lhs = lhs / rhs
}
/// Divides `lhs` and `rhs`, returning the remainder and trapping in case of
/// arithmetic overflow (except in -Ounchecked builds).
#warn_unused_result
public func % (lhs: UInt256, rhs: UInt256) -> UInt256 {
// TODO: - Not Implemented
return UInt256()
}
public func %= (inout lhs: UInt256, rhs: UInt256) {
lhs = lhs % rhs
}
public extension UInt256 {
#warn_unused_result
public func toIntMax() -> IntMax {
return Int64(parts[6] << 32) + Int64(parts[7])
}
#warn_unused_result
public func toUIntMax() -> UIntMax {
return UInt64(parts[6] << 32) + UInt64(parts[7])
}
/// Adds `lhs` and `rhs`, returning the result and a `Bool` that is
/// `true` iff the operation caused an arithmetic overflow.
public static func addWithOverflow(lhs: UInt256, _ rhs: UInt256) -> (UInt256, overflow: Bool) {
var parts = [UInt32]()
var carry = false
for i in (0 ..< 8).reverse() {
let lpart = UInt64(lhs.parts[i])
let rpart = UInt64(rhs.parts[i])
let comp = lpart == UInt64(UInt32.max) && rpart == UInt64(UInt32.max)
let sum64 = lpart + rpart + (carry || comp ? 1 : 0)
let sum32 = UInt32((sum64 << 32) >> 32)
carry = sum64 > UInt64(UInt32.max)
parts.insert(sum32, atIndex: 0)
}
return (UInt256(parts), parts[0] > 0x8fffffff)
}
/// Subtracts `lhs` and `rhs`, returning the result and a `Bool` that is
/// `true` iff the operation caused an arithmetic overflow.
public static func subtractWithOverflow(lhs: UInt256, _ rhs: UInt256) -> (UInt256, overflow: Bool) {
// TODO: -
var parts = [UInt32]()
var borrow = false
var gave = false
for i in (0 ..< 8).reverse() {
borrow = lhs.parts[i] < rhs.parts[i]
let lpart = UInt64(lhs.parts[i]) - (gave ? 1 : 0) + (borrow ? UInt64(UInt32.max) : 0)
let rpart = UInt64(rhs.parts[i])
let sum64 = lpart - rpart
let sum32 = UInt32((sum64 << 32) >> 32)
gave = borrow
parts.insert(sum32, atIndex: 0)
}
return (UInt256(parts), parts[0] > 0x8fffffff)
}
/// Multiplies `lhs` and `rhs`, returning the result and a `Bool` that is
/// `true` iff the operation caused an arithmetic overflow.
public static func multiplyWithOverflow(lhs: UInt256, _ rhs: UInt256) -> (UInt256, overflow: Bool) {
// TODO: - Not Implemented
return (UInt256(), false)
}
/// Divides `lhs` and `rhs`, returning the result and a `Bool` that is
/// `true` iff the operation caused an arithmetic overflow.
public static func divideWithOverflow(lhs: UInt256, _ rhs: UInt256) -> (UInt256, overflow: Bool) {
// TODO: - Not Implemented
return (UInt256(), false)
}
/// Divides `lhs` and `rhs`, returning the remainder and a `Bool` that is
/// `true` iff the operation caused an arithmetic overflow.
public static func remainderWithOverflow(lhs: UInt256, _ rhs: UInt256) -> (UInt256, overflow: Bool) {
// TODO: - Not Implemented
return (UInt256(), false)
}
}
public struct UInt256 : UnsignedIntegerType, Comparable, Equatable {
public typealias IntegerLiteralType = UInt256
public typealias Distance = Int32
public typealias Stride = Int32
private let parts: [UInt32]
private var part0: UInt32 { return parts[0] }
private var part1: UInt32 { return parts[1] }
private var part2: UInt32 { return parts[2] }
private var part3: UInt32 { return parts[3] }
private var part4: UInt32 { return parts[4] }
private var part5: UInt32 { return parts[5] }
private var part6: UInt32 { return parts[6] }
private var part7: UInt32 { return parts[7] }
public static var max: UInt256 {
return UInt256([.max, .max, .max, .max, .max, .max, .max, .max])
}
public var description: String {
var hex = "0x"
for i in 0 ..< parts.count {
let part = parts[i]
hex += String(format:"%08X", part)
if i + 1 < parts.count {
hex += ":"
}
}
return "\(hex)"
}
public var componentDescription: String {
return "\(parts)"
}
public var hashValue: Int {
return (part0.hashValue + part1.hashValue + part2.hashValue + part3.hashValue + part4.hashValue + part5.hashValue + part6.hashValue + part7.hashValue).hashValue
}
public var data: NSData {
let bytes = [part0, part1, part2, part3, part4, part5, part6, part7]
return NSData(bytes: bytes, length: 32)
}
public init(_builtinIntegerLiteral builtinIntegerLiteral: _MaxBuiltinIntegerType) {
self.init(UInt64(_builtinIntegerLiteral: builtinIntegerLiteral))
}
public init() { parts = [0, 0, 0, 0, 0, 0, 0, 0] }
public init(_ newParts: [UInt32]) {
var zeros = UInt256().parts
zeros.replaceRange((8 - newParts.count ..< 8), with: newParts)
parts = zeros
}
public init(_ v: Int8) {
self.init(UInt64(v))
}
public init(_ v: UInt8) {
self.init(UInt64(v))
}
public init(_ v: Int16) {
self.init(UInt64(v))
}
public init(_ v: UInt16) {
self.init(UInt64(v))
}
public init(_ v: Int32) {
self.init(UInt64(v))
}
public init(_ v: UInt32) {
self.init(UInt64(v))
}
public init(_ v: Int) {
self.init(UInt64(v))
}
public init(_ v: UInt) {
self.init(UInt64(v))
}
public init(_ v: Int64) {
self.init(UInt64(v))
}
public init(_ v: UInt64) {
self.init([UInt32(v >> 32), UInt32((v << 32) >> 32)])
}
public init(integerLiteral value: IntegerLiteralType) {
parts = value.parts
}
public init?(data: NSData) {
var parts = [UInt32]()
let size = sizeof(UInt32)
for i in 0 ..< 8 {
var part = UInt32()
data.getBytes(&part, range: NSMakeRange(i * size, size))
parts.append(part)
}
guard parts.count == 8 else { return nil }
self.init(parts)
}
#warn_unused_result
public func advancedBy(n: Stride) -> UInt256 {
return self + UInt256(n)
}
#warn_unused_result
public func advancedBy(n: Distance, limit: UInt256) -> UInt256 {
return limit - UInt256(n) > self ? self + UInt256(n) : limit
}
#warn_unused_result
public func distanceTo(end: UInt256) -> Distance {
return end - self
}
/// Returns the previous consecutive value in a discrete sequence.
///
/// If `UInt256` has a well-defined successor,
/// `UInt256.successor().predecessor() == UInt256`. If `UInt256` has a
/// well-defined predecessor, `UInt256.predecessor().successor() ==
/// UInt256`.
///
/// - Requires: `UInt256` has a well-defined predecessor.
#warn_unused_result
public func predecessor() -> UInt256 {
return advancedBy(-1)
}
#warn_unused_result
public func successor() -> UInt256 {
return advancedBy(1)
}
}
extension UInt256 : BitwiseOperationsType {}
/// Returns the intersection of bits set in `lhs` and `rhs`.
///
/// - Complexity: O(1).
#warn_unused_result
public func & (lhs: UInt256, rhs: UInt256) -> UInt256 {
var parts = [UInt32]()
for i in 0 ..< 8 {
parts.append(lhs.parts[i] & rhs.parts[i])
}
return UInt256(parts)
}
/// Returns the union of bits set in `lhs` and `rhs`.
///
/// - Complexity: O(1).
#warn_unused_result
public func | (lhs: UInt256, rhs: UInt256) -> UInt256 {
var parts = [UInt32]()
for i in 0 ..< 8 {
parts.append(lhs.parts[i] | rhs.parts[i])
}
return UInt256(parts)
}
/// Returns the bits that are set in exactly one of `lhs` and `rhs`.
///
/// - Complexity: O(1).
#warn_unused_result
public func ^ (lhs: UInt256, rhs: UInt256) -> UInt256 {
var parts = [UInt32]()
for i in 0 ..< 8 {
parts.append(lhs.parts[i] ^ rhs.parts[i])
}
return UInt256(parts)
}
/// Returns `x ^ ~UInt256.allZeros`.
///
/// - Complexity: O(1).
#warn_unused_result
prefix public func ~ (x: UInt256) -> UInt256 {
return x ^ ~UInt256.allZeros
}
public extension UInt256 {
public static var allZeros: UInt256 {
return UInt256()
}
}
public extension NSCoder {
public func encodeUInt256(unsignedInteger: UInt256, forKey key: String) {
encodeObject(unsignedInteger.data, forKey: key)
}
public func decodeUInt256ForKey(key: String) -> UInt256 {
guard let data = decodeObjectForKey(key) as? NSData else { return UInt256() }
return UInt256(data: data) ?? UInt256()
}
}
Disclaimer: I never tried
I suppose you can build your own Int128.
E.g. this library defined a UInt256 type.
Once you have your new type you can simply use it with OptionSetType I guess.
struct YourOptions : OptionSetType{
let rawValue : Int128
init(rawValue:Int128) {
self.rawValue = rawValue
}
}

How to convert a decimal number to binary in Swift?

How can I convert Int to UInt8 in Swift?
Example. I want to convert number 22 to 0b00010110
var decimal = 22
var binary:UInt8 = ??? //What should I write here?
You can convert the decimal value to a human-readable binary representation using the String initializer that takes a radix parameter:
let num = 22
let str = String(num, radix: 2)
print(str) // prints "10110"
If you wanted to, you could also pad it with any number of zeroes pretty easily as well:
Swift 5
func pad(string : String, toSize: Int) -> String {
var padded = string
for _ in 0..<(toSize - string.count) {
padded = "0" + padded
}
return padded
}
let num = 22
let str = String(num, radix: 2)
print(str) // 10110
pad(string: str, toSize: 8) // 00010110
Swift 5.1 / Xcode 11
Thanks Gustavo Seidler.
My version of his solution is complemented by spaces for readability.
extension BinaryInteger {
var binaryDescription: String {
var binaryString = ""
var internalNumber = self
var counter = 0
for _ in (1...self.bitWidth) {
binaryString.insert(contentsOf: "\(internalNumber & 1)", at: binaryString.startIndex)
internalNumber >>= 1
counter += 1
if counter % 4 == 0 {
binaryString.insert(contentsOf: " ", at: binaryString.startIndex)
}
}
return binaryString
}
}
Examples:
UInt8(9).binaryDescription // "0000 1001"
Int8(5).binaryDescription // "0000 0101"
UInt16(1945).binaryDescription // "0000 0111 1001 1001"
Int16(14).binaryDescription // "0000 0000 0000 1110"
Int32(6).binaryDescription // "0000 0000 0000 0000 0000 0000 0000 0110"
UInt32(2018).binaryDescription // "0000 0000 0000 0000 0000 0111 1110 0010"
I modified someone's version to swift 3.0 used the correct initializer for creating a string with repeated values
extension String {
func pad(with character: String, toLength length: Int) -> String {
let padCount = length - self.characters.count
guard padCount > 0 else { return self }
return String(repeating: character, count: padCount) + self
}
}
String(37, radix: 2).pad(with: "0", toLength: 8) // "00100101"
Since none of the solutions contemplate negative numbers, I came up with a simple solution that basically reads the number's internal representation and pads it automatically to the width of its type. This should work on all BinaryInteger types.
extension BinaryInteger {
var binaryDescription: String {
var binaryString = ""
var internalNumber = self
for _ in (1...self.bitWidth) {
binaryString.insert(contentsOf: "\(internalNumber & 1)", at: binaryString.startIndex)
internalNumber >>= 1
}
return "0b" + binaryString
}
}
Examples:
UInt8(22).binaryDescription // "0b00010110"
Int8(60).binaryDescription // "0b00111100"
Int8(-60).binaryDescription // "0b11000100"
Int16(255).binaryDescription // "0b0000000011111111"
Int16(-255).binaryDescription // "0b1111111100000001"
Went through a lot of answers on this post but I wonder why haven't anyone mentioned the API leadingZeroBitCount on FixedWidthInteger
This returns the number of zeros in specific UInt
eg:
UInt(4).leadingZeroBitCount //61
UInt16(4).leadingZeroBitCount //13
Swift Version
4.1
USAGE
let strFive = String.binaryRepresentation(of: UInt8(5))
print(strFive) // Prints: 00000101
UNDER THE HOOD
extension String {
static func binaryRepresentation<F: FixedWidthInteger>(of val: F) -> String {
let binaryString = String(val, radix: 2)
if val.leadingZeroBitCount > 0 {
return String(repeating: "0", count: val.leadingZeroBitCount) + binaryString
}
return binaryString
}
}
I agree with the others, Although the for-loop seems redundant for repeating a character.
we can simply go with the following String initialiser:
init(count count: Int, repeatedValue c: Character)
usage example:
let string = String(count: 5, repeatedValue: char)
Here is a full example:
let someBits: UInt8 = 0b00001110
let str = String(someBits, radix:2) //binary base
let padd = String(count: (8 - str.characters.count), repeatedValue: Character("0")) //repeat a character
print(padd + str)
If you want binary to have the value of 22, just assign it that: binary = 22 or you could write it as binary = 0b00010110; the two statements are equivalent.
Here's how I would do it:
extension String {
public func pad(with padding: Character, toLength length: Int) -> String {
let paddingWidth = length - self.characters.count
guard 0 < paddingWidth else { return self }
return String(repeating: padding, count: paddingWidth) + self
}
}
String(0b1010, radix: 2).pad(with: "0", toLength: 8) //00001010
So I had this come up recently. The other generic solutions didn't work for me, due to various issues. Anyway, here's my solution (Swift 4):
extension String {
init<B: FixedWidthInteger>(fullBinary value: B) {
self = value.words.reduce(into: "") {
$0.append(contentsOf: repeatElement("0", count: $1.leadingZeroBitCount))
$0.append(String($1, radix: 2))
}
}
}
Tests:
// result: 0000000000000000000000000000000000000000000000000000000000001001
String(fullBinary: 9)
// result: 1111111111111111111111111111111111111111111111111111111100000000
String(fullBinary: -256)
// result: 1111111111111111111111111111111111111111111111111101100011110001
String(fullBinary: -9999)
// result: 0000000000000000000000000000000000000000000000000010011100001111
String(fullBinary: 9999)
// result: 1100011000000000000000000000000000000000000011110110100110110101
String(fullBinary: 14267403619510741429 as UInt)
swift 4.1
extension String {
public func pad(with padding: Character, toLength length: Int) -> String {
let paddingWidth = length - self.count
guard 0 < paddingWidth else { return self }
return String(repeating: padding, count: paddingWidth) + self
}
}
extension UInt8 {
public func toBits() -> String
{
let a = String( self, radix : 2 )
let b = a.pad(with: "0", toLength: 8)
return b
}
}
func showBits( _ list: [UInt8] )
{
for num in list
{
showBits(num)
}
}
func showBits( _ num: UInt8 )
{
//print(num, String( num, radix : 2 ))
print( "\(num) \t" + num.toBits())
}
let initialBits :UInt8 = 0b00001111
let invertedBits = ~initialBits
showBits( [initialBits, invertedBits] )
result
15 00001111
240 11110000
good for you~
There is no difference between binary and decimal numeral systems, when you're working with variables until you want to visualize them or if you want to convert types which can hold different ammount of bits.
In your case is enough to write
var decimal = 22
var binary = UInt8(decimal)
But this will crash (overflow happens) if decimal will hold a value more than 255, because it is maximum value which UInt8 can hold.
Depending on what you want to achieve you can write
var decimal = 261 // 0b100000101
var binary = UInt8(truncatingBitPattern: decimal) // 0b00000101
You'll get 0 as a result, because this initializer will truncate less significant bits.
Second option is
var decimal = 256 // 0b100000000
var binary = UInt8(exactly: decimal) // nil
This initializer returns nil result instead of crashing, if overflow happens.
P.S. If you want to see binary string representation use
String(decimal, radix: 2)
String(binary, radix: 2)
I modified your version to Swift 2.0 count on strings and added a length check:
extension String {
func pad(length: Int) -> String {
let diff = length - self.characters.count
if diff > 0 {
var padded = self
for _ in 0..<diff {
padded = "0" + padded
}
return padded
} else {
return self
}
}
}
Most answers here forget to account for 0, and outputs a representation there is too long.
Based on the answer by #karwag I present:
extension FixedWidthInteger {
var binaryStringRepresentation: String {
words.reduce(into: "") {
$0.append(contentsOf: repeatElement("0", count: $1.leadingZeroBitCount))
if $1 != 0 {
$0.append(String($1, radix: 2))
}
}
}
}
It's a bit overcomplicated, but very fast.
It separates every 4 bits, leaving no white spaces in the string.
extension BinaryInteger {
var binaryDescription: String {
var string = ""
var num = self
let range: UInt64
switch self.bitWidth {
case 8: range = 0x80
case 16: range = 0x8000
case 32: range = 0x80000000
case 64: range = 0x8000000000000000
default: range = 0x0
}
if Self.isSigned {
let mask = Self(range / 2)
let last = num & 1
num >>= 1
for i in 1...self.bitWidth-1 {
string.append("\(num & mask == mask ? 1 : 0)")
num <<= 1
if i % 4 == 0 { string.append(" ") }
}
string.append("\(last)")
} else { // Unsigned
let mask = Self(range)
for i in 1...self.bitWidth {
string.append("\(num & mask == mask ? 1 : 0)")
num <<= 1
if i % 4 == 0 { string.append(" ") }
}
string = String(string.dropLast())
}
return string
}
}
Examples:
UInt8(245).binaryDescription // 1111 0101
Int8(108).binaryDescription // 0110 1100