Trying to use Mikrotik API library written in Swift:
https://wiki.mikrotik.com/wiki/API_in_Swift
It works well, when I'm sending small commands
However, If I will try to send large script string, I'm getting error:
Fatal error: Not enough bits to represent the passed value
The code that crashes:
private func writeLen(_ command : String) -> Data {
let data = command.data(using: String.Encoding.utf8)
var len = data?.count ?? 0
var dat = Data()
if len < 0x80 {
dat.append([UInt8(len)], count: 1)
}else if len < 0x4000 {
len = len | 0x8000;
dat.append(Data(bytes: [UInt8(len >> 8)]))
dat.append(Data(bytes: [UInt8(len)]))
}else if len < 0x20000 {
len = len | 0xC00000;
dat.append(Data(bytes: [UInt8(len >> 16)]))
dat.append(Data(bytes: [UInt8(len >> 8)]))
dat.append(Data(bytes: [UInt8(len)]))
}
else if len < 0x10000000 {
len = len | 0xE0000000;
dat.append(Data(bytes: [UInt8(len >> 24)]))
dat.append(Data(bytes: [UInt8(len >> 16)]))
dat.append(Data(bytes: [UInt8(len >> 8)]))
dat.append(Data(bytes: [UInt8(len)]))
}else{
dat.append(Data(bytes: [0xF0]))
dat.append(Data(bytes: [UInt8(len >> 24)]))
dat.append(Data(bytes: [UInt8(len >> 16)]))
dat.append(Data(bytes: [UInt8(len >> 8)]))
dat.append(Data(bytes: [UInt8(len)]))
}
return dat
}
The fatal error appears in this part:
else if len < 0x4000 {
len = len | 0x8000;
dat.append(Data(bytes: [UInt8(len >> 8)]))
dat.append(Data(bytes: [UInt8(len)]))
}
at line:
dat.append(Data(bytes: [UInt8(len)]))
Data size at this moment is 1072 bytes and len equals to 33840, UInt8 cannot be initiated with that len value.
How can I edit the code to avoid the error?
I'm using Swift 4.2
EDIT:
Here is an example of the same logic but written in JavaScript
module.exports.encodeString = function encodeString(s) {
var data = null;
var len = Buffer.byteLength(s);
var offset = 0;
if (len < 0x80) {
data = new Buffer(len + 1);
data[offset++] = len;
} else if (len < 0x4000) {
data = new Buffer(len + 2);
len |= 0x8000;
data[offset++] = (len >> 8) & 0xff;
data[offset++] = len & 0xff;
} else if (len < 0x200000) {
data = new Buffer(len + 3);
len |= 0xC00000;
data[offset++] = (len >> 16) & 0xff;
data[offset++] = (len >> 8) & 0xff;
data[offset++] = len & 0xff;
} else if (len < 0x10000000) {
data = new Buffer(len + 4);
len |= 0xE0000000;
data[offset++] = (len >> 24) & 0xff;
data[offset++] = (len >> 16) & 0xff;
data[offset++] = (len >> 8) & 0xff;
data[offset++] = len & 0xff;
} else {
data = new Buffer(len + 5);
data[offset++] = 0xF0;
data[offset++] = (len >> 24) & 0xff;
data[offset++] = (len >> 16) & 0xff;
data[offset++] = (len >> 8) & 0xff;
data[offset++] = len & 0xff;
}
data.utf8Write(s, offset);
return data;
};
Maybe someone sees the difference
Thanks for the JavaScript translation. It clearly shows the problem, since the Swift version does not resemble it.
Let's take this stretch of the JavaScript, as it is the part you are stumbling over in Swift:
} else if (len < 0x4000) {
data = new Buffer(len + 2);
len |= 0x8000;
data[offset++] = (len >> 8) & 0xff;
data[offset++] = len & 0xff;
}
That is "translated" in Swift like this:
} else if len < 0x4000 {
len = len | 0x8000;
dat.append(Data(bytes: [UInt8(len >> 8)]))
dat.append(Data(bytes: [UInt8(len)]))
}
Well, you can see at once that they are not at all the same. In the last line, the Swift version has forgotten the & 0xff.
If you put that in, everything starts working. And we can make it look a lot more like the JavaScript original too:
} else if len < 0x4000 {
len |= 0x8000;
dat.append(Data(bytes: [UInt8(len >> 8)]))
dat.append(Data(bytes: [UInt8(len & 0xff)]))
}
So I'd say, yes, use the JavaScript as a guide and you'll be fine. If that last line doesn't feel "swifty" enough to you, then write it like this:
dat.append(Data(bytes: [UInt8(truncatingIfNeeded: len)]))
It's exactly the same result.
I don't guarantee that everything will work perfectly after you make those changes (the Swift code you showed still does not look to me like it does the same thing as the JavaScript), but at least the part where we write the length bytes into the start of the Data will work correctly.
Related
I wanted to save a Nef polyhedron into an OFF file for visualizing it. As written in the CGAL Nef polyhedra user manual (see paragraphs 5.4 and 5.5), a Nef polyhedron can be converted both to a Polyhedron_3 or a Surface_mesh.
However, I noticed that when converting to those structures and then saving it into an OFF file, the results are different.
Here I report the code for a minimal example:
#include <list>
#include <iostream>
#include <fstream>
#include <CGAL/Exact_predicates_exact_constructions_kernel.h>
#include <CGAL/Polyhedron_3.h>
#include <CGAL/Nef_polyhedron_3.h>
#include <CGAL/IO/Nef_polyhedron_iostream_3.h>
#include <CGAL/Surface_mesh.h>
#include <CGAL/boost/graph/convert_nef_polyhedron_to_polygon_mesh.h>
typedef CGAL::Exact_predicates_exact_constructions_kernel Kernel;
typedef Kernel::Point_3 Point_3;
typedef CGAL::Surface_mesh<Point_3> Mesh;
typedef CGAL::Polyhedron_3<Kernel> Polyhedron_3;
typedef CGAL::Nef_polyhedron_3<Kernel> Nef_polyhedron;
typedef Kernel::Vector_3 Vector_3;
typedef Kernel::Aff_transformation_3 Aff_transformation_3;
int convertStlToOff(const char* inputFilename, const char* outputFilename)
{
//read 80 bytes and put in std::cerr
std::ifstream obj(inputFilename, std::ios::in | std::ios::binary);
for (int i = 0; i < 80; i++) {
boost::uint8_t c;
obj.read(reinterpret_cast<char*>(&c), sizeof(c));
std::cerr << c;
}
std::cerr << std::endl;
//read 4 bytes and initialize number of triangles
boost::uint32_t N32;
obj.read(reinterpret_cast<char*>(&N32), sizeof(N32));
unsigned int N = N32;
std::cerr << N << " triangles" << std::endl;
//reserve space for N faces
std::vector<Point_3> points;
std::map<Point_3, int> pmap;
typedef boost::tuple<int, int, int> Face;
std::vector<Face> faces;
faces.reserve(N);
//read all faces
int number_of_points = 0;
int number_of_snapped_points = 0;
for (int i = 0; i < N; i++)
{
//read face normal (it is ignored)
float normal[3];
obj.read(reinterpret_cast<char*>(&normal[0]), sizeof(normal[0]));
obj.read(reinterpret_cast<char*>(&normal[1]), sizeof(normal[1]));
obj.read(reinterpret_cast<char*>(&normal[2]), sizeof(normal[2]));
//read coordinates of all 3 points
int index[3];
for (int j = 0; j < 3; j++)
{
float x, y, z;
obj.read(reinterpret_cast<char*>(&x), sizeof(x));
obj.read(reinterpret_cast<char*>(&y), sizeof(y));
obj.read(reinterpret_cast<char*>(&z), sizeof(z));
Point_3 p(x, y, z);
if (pmap.find(p) == pmap.end())
{
// check brute force if there is a close point
bool found_close_point = false;
/*for (int k = 0; k < points.size(); k++)
{
if (sqrt(CGAL::squared_distance(p, points[k])) < 0.00001)
{
index[j] = k;
found_close_point = true;
number_of_snapped_points++;
}
}*/
if (!found_close_point)
{
points.push_back(p);
index[j] = number_of_points;
pmap[p] = number_of_points++;
}
}
else {
index[j] = pmap[p];
}
}
faces.push_back(boost::make_tuple(index[0], index[1], index[2]));
//read two additional bytes, and ignore them
char c;
obj.read(reinterpret_cast<char*>(&c), sizeof(c));
obj.read(reinterpret_cast<char*>(&c), sizeof(c));
}
std::cerr << number_of_snapped_points << " snapped points" << std::endl;
std::ofstream outputFile(outputFilename);
outputFile.precision(20);
outputFile << "OFF\n" << points.size() << " " << faces.size() << " 0" << std::endl;
for (int i = 0; i < points.size(); i++)
{
outputFile << points[i] << std::endl;
}
for (int i = 0; i < faces.size(); i++)
{
outputFile << "3 " << boost::get<0>(faces[i]) << " " << boost::get<1>(faces[i]) << " " << boost::get<2>(faces[i]) << std::endl;
}
return 0;
}
void fill_cube_1(Polyhedron_3 & poly)
{
std::string input =
"OFF\n\
8 12 0\n\
-1 -1 -1\n\
-1 1 -1\n\
1 1 -1\n\
1 -1 -1\n\
-1 -1 1\n\
-1 1 1\n\
1 1 1\n\
1 -1 1\n\
3 0 1 3\n\
3 3 1 2\n\
3 0 4 1\n\
3 1 4 5\n\
3 3 2 7\n\
3 7 2 6\n\
3 4 0 3\n\
3 7 4 3\n\
3 6 4 7\n\
3 6 5 4\n\
3 1 5 6\n\
3 2 1 6";
std::stringstream ss;
ss << input;
ss >> poly;
}
enum savingModality
{
SAVE_AS_POLYHEDRON_3 = 0,
SAVE_AS_SURFACE_MESH = 1,
};
int saveNefObjectInOffFile(Nef_polyhedron offObject, const char* filename, savingModality modality)
{
if (!offObject.is_simple())
{
printf("Object is not simple. Cannot convert to mesh or polyhedron\n");
return 1;
}
std::ofstream outStream;
outStream.open(filename);
if (modality == SAVE_AS_POLYHEDRON_3)
{
Polyhedron_3 outputPolyhedron;
offObject.convert_to_Polyhedron(outputPolyhedron);
outStream << outputPolyhedron;
}
else if (modality == SAVE_AS_SURFACE_MESH)
{
Mesh outputMesh;
CGAL::convert_nef_polyhedron_to_polygon_mesh(offObject, outputMesh);
outStream << outputMesh;
}
outStream.close();
return 0;
}
int main()
{
int ret;
//construct nef object #1
Polyhedron_3 cube1;
fill_cube_1(cube1);
Nef_polyhedron nefObject1(cube1);
//construct nef object #2
Nef_polyhedron nefObject2(cube1);
Aff_transformation_3 scale2(1, 0, 0,
0, 1, 0,
0, 0, 1,
2);
nefObject2.transform(scale2);
Aff_transformation_3 translation2(CGAL::TRANSLATION, Vector_3(-0.5, -0.5, -0.5));
nefObject2.transform(translation2);
//construct nef object #3
Nef_polyhedron nefObject3;
nefObject3 = nefObject1 - nefObject2;
//save results into .off file
ret = saveNefObjectInOffFile(nefObject3, "out1.off", SAVE_AS_POLYHEDRON_3);
ret = saveNefObjectInOffFile(nefObject3, "out2.off", SAVE_AS_SURFACE_MESH);
return 0;
}
and the screenshots of the visualization of the two files: saving as Polyhedron_3 and saving as Surface_mesh. As you can see, it seems like if some faces were missing.
My question is: "Why the results are visualized different?"
The output to Polyhedron_3 is triangulated while the output to Surface_mesh is not. There is a bug in meshlab to display non convex faces I guess.
Look at the doc you'll see that there is a Boolean parameter to trigger or not the triangulation.
i am working on Cy8CKIT -050 cypress kit, flash chip W25Q128FV ,SD card, and TFT display https://www.buydisplay.com/default/lcd-5-ssd1963-tft-module-touch-screen-display-800x480-mcu-arduino
i have interfaced display, flash chip using SPIFFS and sd card using FATFS with Cy8CKIT - 050
but the speed of BMP image loading from the sd card is very slow, right now size of image is 150*90 and its taking 635ms
how can i improve this?
also i want write BMP image from the sd card to Flash chip and load image from flash chip to display, how can i do that
i am attaching code for reference
`void DrawImage(UG_S16 x1, UG_S16 y1, UG_S16 x2, UG_S16 y2, const TCHAR* namefile)
{
FATFS fatFs;
FIL fileO;
uint16_t buffimg,buffimg1;
uint16_t buffer[1];
UINT br,br1;
FRESULT pt;
char read[50];
uint32_t bmpWidth = 0;
uint32_t bmpHeight = 0;
uint16_t bmpImageoffset;
uint16_t rowSize;
uint8_t rdBuf[60];
uint8_t header_buff[60];
uint16_t position = 0;
uint8_t numberOfPixels = 0;
uint32_t commpression = 0;
uint32_t imageSize = 0;
unsigned int byte_read;
uint16_t bmpSig;
uint8_t row_buff[rowSize]; // only read 1 row at a time
uint8_t cc,R,G,B;
uint16_t pixel;
uint16_t color;
uint32 i,j, count=0;
uint8_t pColorData[DISPLAY_WIDTH*3];
uint8_t pColorData_flash[DISPLAY_WIDTH*3];
uint8_t pColorData_flash1[DISPLAY_WIDTH*3];
uint32_t pos;
int row =0 ;
if((x1 < 0) ||(x1 >= DISPLAY_WIDTH) || (y1 < 0) || (y1 >= DISPLAY_HEIGHT)) return;
if((x2 < 0) ||(x2 >= DISPLAY_WIDTH) || (y2 < 0) || (y2 >= DISPLAY_HEIGHT)) return;
/* Mount drive */
pt = f_mount(&fatFs, "", 1);
if(pt != FR_OK) return;
/* open file */
pt = f_open(&fileO, namefile, FA_READ);
if(pt != FR_OK) return;
/* read bmp header */
pt = f_read(&fileO, header_buff, sizeof header_buff, &byte_read);
if(pt != FR_OK){
return;
}
//my_spiffs_mount();
//spiffs_file fd = SPIFFS_open(&fs, "bmpFile", SPIFFS_CREAT | SPIFFS_TRUNC | SPIFFS_RDWR, 0);
//if (SPIFFS_write(&fs, fd,header_buff, sizeof(header_buff)) < 0) sprintf(bufferOut,"wr errno %i\n", SPIFFS_errno(&fs));
//SPIFFS_close(&fs, fd);
//UG_PutString(1,200,bufferOut);
//fd = SPIFFS_open(&fs, "bmpFile", SPIFFS_RDWR, 0);
//if (SPIFFS_read(&fs, fd, rdBuf, sizeof(rdBuf)) < 0) sprintf(bufferOut,"rd errno %i\n", SPIFFS_errno(&fs));
//SPIFFS_close(&fs, fd);
//UG_PutString(1,220,bufferOut);
bmpSig = (header_buff[1] << 8) | header_buff [0];
sprintf(bufferOut,"Signature:\t0x%X\n", bmpSig);
UG_PutString(1,160,bufferOut);
if(bmpSig != 0x4D42){
UG_PutString(1,180,"not a bmp");
return; // not a bmp
}
bmpImageoffset = header_buff [7] * 256 + header_buff [6];
bmpHeight = (header_buff [25] << 24) | (header_buff [24] << 16) | (header_buff [23] << 8) | header_buff [22];
bmpWidth = (header_buff [21] << 24) | (header_buff [20] << 16) | (header_buff [19] << 8) | header_buff [18];
position = header_buff [10]; // starting point in file
numberOfPixels = header_buff [29] * 256 + header_buff [28];
commpression = (header_buff [30] << 24) | (header_buff [31] << 16) | (header_buff [32] << 8) | header_buff [33];
imageSize = (header_buff [37] << 24) | (header_buff [36] << 16) | (header_buff [35] << 8) | header_buff [34];
rowSize = ((numberOfPixels * bmpWidth + 31) /32 ) * 4;
sprintf(read ,"Offset:\t%d\n", bmpImageoffset);
UG_PutString(1,1,read);
sprintf(read ,"Height:\t%u\n", bmpHeight);
UG_PutString(1,20,read);
sprintf(read ,"Width:\t\t%u\n", bmpWidth);
UG_PutString(1,40,read);
sprintf(read ,"Row Size:\t%d\n", rowSize);
UG_PutString(1,60,read);
sprintf(read ,"Start Index:\t%d\n", position);
UG_PutString(1,80,read);
sprintf(read ,"# of Pixels:\t%d\n", numberOfPixels);
UG_PutString(1,100,read);
sprintf(read ,"Compression:\t%u\n", commpression);
UG_PutString(1,120,read);
sprintf(read ,"Image Size:\t%u\n", imageSize);
UG_PutString(1,140,read);
Display_WindowSet(x1, x2, y1, y2);
/* Write to LCD-GRAM */
Display_WriteCommand(0x2c);
/* get current millisecond count */
unsigned long nCurrentMillis = nmillis;
for(i=0;i<bmpHeight; i++)
{
for(j=0; j<rowSize; j++)
{
count++;
LED3_Write(1);
f_read(&fileO,pColorData+j,1,&byte_read);
//fd = SPIFFS_open(&fs, "bmpFile", SPIFFS_RDWR , 0);
//if (SPIFFS_write(&fs, fd,pColorData+j, 1) < 0) sprintf(bufferOut,"wr errno %i\n", SPIFFS_errno(&fs));
//SPIFFS_close(&fs, fd);
//fd = SPIFFS_open(&fs, "bmpFile", SPIFFS_RDWR, 0);
//if (SPIFFS_read(&fs, fd, pColorData_flash+j, 1) < 0) sprintf(bufferOut,"rd errno %i\n", SPIFFS_errno(&fs));
//SPIFFS_close(&fs, fd);
//sprintf(bufferOut," row %lu",j);
//UG_PutString(1,360,bufferOut);
//CyDelay(1);
LED3_Write(0);
}
for(j=0;j<bmpWidth;j++)
{
//fd = SPIFFS_open(&fs, "bmpFile", SPIFFS_RDWR, 0);
//if (SPIFFS_read(&fs, fd, pColorData_flash,rowSize) < 0) sprintf(bufferOut,"rd errno %i\n", SPIFFS_errno(&fs));
//SPIFFS_close(&fs, fd);
uint32_t k = j*3;
R = pColorData[k+2];
G = pColorData[k+1];
B = pColorData[k];
color = color565(R,G,B);
Display_WriteData (color);
//sprintf(bufferOut," width %lu",j);
//UG_PutString(1,380,bufferOut);
}
//sprintf(bufferOut," height %lu",i);
//UG_PutString(1,400,bufferOut);
}
unsigned long nTime = nmillis-nCurrentMillis;
sprintf(bufferOut,"time taken for load : %lu ms",nTime);
UG_PutString(1,320,bufferOut);
sprintf(bufferOut,"%lu",count);
UG_PutString(1,340,bufferOut);
f_close(&fileO);
}
`
A project I am working on uses Apache Shiro as a security framework. Passwords are SHA1 hashed (no salt, no iterations). Login is SSL secured. However, the remaining part of the application is not SSL secured. In this context (no SSL) there should be a form where a user can change the password.
Since it wouldn't be a good idea to transmit it plainly it should be hashed on the client and then transmitted to the server. As the client is GWT (2.3) based, I am trying this library http://code.google.com/p/gwt-crypto, which uses code from bouncycastle.
However, in many cases (not all) the hashes generated by both frameworks differ in 1-4(?) characters.
For instance "happa3" is hashed to
"fe7f3cffd8a5f0512a5f1120f1369f48cd6f47c2"
by both implementations, whereas just "happa" is hashed to
"fb3c3a741b4e07a87d9cb68f3db020d6fbfed00a"
by the Shiro implementation and to
"fb3c3a741b4e07a87d9cb63f3db020d6fbfed00a"
by the gwt-crypto implementation (23rd character differs).
I wonder whether there is a "correct"/standard SHA1 hashing and whether there is a bug in one of the libraries or maybe my usage of them is flawed.
One of my first thoughts was related to different encodings or strange conversions due to different transport mechanisms (RPC vs. Post). To my knowledge though (and what puzzles me most), SHA1 hashes should differ completely with a high probability if there is just a difference of a single bit. So different encodings shouldn't be the issue here.
I am using this code on the client (GWT) for hashing:
String hashed = toHex(createSHA1Hash("password"));
...
private String createSHA1Hash(String passwordString){
SHA1Digest sha1 = new SHA1Digest();
byte[] bytes;
byte[] result = new byte[sha1.getDigestSize()];
try {
bytes = passwordString.getBytes();
sha1.update(bytes, 0, bytes.length);
int val = sha1.doFinal(result, 0);
} catch (UnsupportedEncodingException e) {}
return new String(result);
}
public String toHex(String arg) {
return new BigInteger(1, arg.getBytes()).toString(16);
}
And this on the server (Shiro):
String hashed = new Sha1Hash("password").toHex()
which afaics does something very similar behind the scenes (had a quick view on the source code).
Did I miss something obvious here?
EDIT: Seems like the GWT code does not run natively for some reason (i.e. just in development mode) and silently fails (it does compile, though). Have to find out why...
Edit(2): "int val = sha1.doFinal(result, 0);" is the line that makes trouble, i.e. if present, the whole code does not run natively (JS) but only in dev-mode (with wrong results)
You could test this version:
public class SHA1 {
public static native String calcSHA1(String s) /*-{
//
// A JavaScript implementation of the Secure Hash Algorithm, SHA-1, as defined
// in FIPS 180-1
// Version 2.2 Copyright Paul Johnston 2000 - 2009.
// Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
// Distributed under the BSD License
// See http://pajhome.org.uk/crypt/md5 for details.
//
//
// Configurable variables. You may need to tweak these to be compatible with
// the server-side, but the defaults work in most cases.
//
var hexcase = 0; // hex output format. 0 - lowercase; 1 - uppercase
var b64pad = ""; // base-64 pad character. "=" for strict RFC compliance
//
// These are the functions you'll usually want to call
// They take string arguments and return either hex or base-64 encoded strings
//
function b64_sha1(s) { return rstr2b64(rstr_sha1(str2rstr_utf8(s))); }
function any_sha1(s, e) { return rstr2any(rstr_sha1(str2rstr_utf8(s)), e); }
function hex_hmac_sha1(k, d)
{ return rstr2hex(rstr_hmac_sha1(str2rstr_utf8(k), str2rstr_utf8(d))); }
function b64_hmac_sha1(k, d)
{ return rstr2b64(rstr_hmac_sha1(str2rstr_utf8(k), str2rstr_utf8(d))); }
function any_hmac_sha1(k, d, e)
{ return rstr2any(rstr_hmac_sha1(str2rstr_utf8(k), str2rstr_utf8(d)), e); }
//
// Perform a simple self-test to see if the VM is working
//
function sha1_vm_test()
{
return hex_sha1("abc").toLowerCase() == "a9993e364706816aba3e25717850c26c9cd0d89d";
}
//
// Calculate the SHA1 of a raw string
//
function rstr_sha1(s)
{
return binb2rstr(binb_sha1(rstr2binb(s), s.length * 8));
}
//
// Calculate the HMAC-SHA1 of a key and some data (raw strings)
//
function rstr_hmac_sha1(key, data)
{
var bkey = rstr2binb(key);
if(bkey.length > 16) bkey = binb_sha1(bkey, key.length * 8);
var ipad = Array(16), opad = Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = binb_sha1(ipad.concat(rstr2binb(data)), 512 + data.length * 8);
return binb2rstr(binb_sha1(opad.concat(hash), 512 + 160));
}
//
// Convert a raw string to a hex string
//
function rstr2hex(input)
{
try { hexcase } catch(e) { hexcase=0; }
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var output = "";
var x;
for(var i = 0; i < input.length; i++)
{
x = input.charCodeAt(i);
output += hex_tab.charAt((x >>> 4) & 0x0F)
+ hex_tab.charAt( x & 0x0F);
}
return output;
}
//
// Convert a raw string to a base-64 string
//
function rstr2b64(input)
{
try { b64pad } catch(e) { b64pad=''; }
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var output = "";
var len = input.length;
for(var i = 0; i < len; i += 3)
{
var triplet = (input.charCodeAt(i) << 16)
| (i + 1 < len ? input.charCodeAt(i+1) << 8 : 0)
| (i + 2 < len ? input.charCodeAt(i+2) : 0);
for(var j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > input.length * 8) output += b64pad;
else output += tab.charAt((triplet >>> 6*(3-j)) & 0x3F);
}
}
return output;
}
//
// Convert a raw string to an arbitrary string encoding
//
function rstr2any(input, encoding)
{
var divisor = encoding.length;
var remainders = Array();
var i, q, x, quotient;
// Convert to an array of 16-bit big-endian values, forming the dividend
var dividend = Array(Math.ceil(input.length / 2));
for(i = 0; i < dividend.length; i++)
{
dividend[i] = (input.charCodeAt(i * 2) << 8) | input.charCodeAt(i * 2 + 1);
}
//
// Repeatedly perform a long division. The binary array forms the dividend,
// the length of the encoding is the divisor. Once computed, the quotient
// forms the dividend for the next step. We stop when the dividend is zero.
// All remainders are stored for later use.
//
while(dividend.length > 0)
{
quotient = Array();
x = 0;
for(i = 0; i < dividend.length; i++)
{
x = (x << 16) + dividend[i];
q = Math.floor(x / divisor);
x -= q * divisor;
if(quotient.length > 0 || q > 0)
quotient[quotient.length] = q;
}
remainders[remainders.length] = x;
dividend = quotient;
}
// Convert the remainders to the output string
var output = "";
for(i = remainders.length - 1; i >= 0; i--)
output += encoding.charAt(remainders[i]);
// Append leading zero equivalents
var full_length = Math.ceil(input.length * 8 /
(Math.log(encoding.length) / Math.log(2)))
for(i = output.length; i < full_length; i++)
output = encoding[0] + output;
return output;
}
//
// Encode a string as utf-8.
// For efficiency, this assumes the input is valid utf-16.
//
function str2rstr_utf8(input)
{
var output = "";
var i = -1;
var x, y;
while(++i < input.length)
{
// Decode utf-16 surrogate pairs
x = input.charCodeAt(i);
y = i + 1 < input.length ? input.charCodeAt(i + 1) : 0;
if(0xD800 <= x && x <= 0xDBFF && 0xDC00 <= y && y <= 0xDFFF)
{
x = 0x10000 + ((x & 0x03FF) << 10) + (y & 0x03FF);
i++;
}
// Encode output as utf-8
if(x <= 0x7F)
output += String.fromCharCode(x);
else if(x <= 0x7FF)
output += String.fromCharCode(0xC0 | ((x >>> 6 ) & 0x1F),
0x80 | ( x & 0x3F));
else if(x <= 0xFFFF)
output += String.fromCharCode(0xE0 | ((x >>> 12) & 0x0F),
0x80 | ((x >>> 6 ) & 0x3F),
0x80 | ( x & 0x3F));
else if(x <= 0x1FFFFF)
output += String.fromCharCode(0xF0 | ((x >>> 18) & 0x07),
0x80 | ((x >>> 12) & 0x3F),
0x80 | ((x >>> 6 ) & 0x3F),
0x80 | ( x & 0x3F));
}
return output;
}
//
// Encode a string as utf-16
//
function str2rstr_utf16le(input)
{
var output = "";
for(var i = 0; i < input.length; i++)
output += String.fromCharCode( input.charCodeAt(i) & 0xFF,
(input.charCodeAt(i) >>> 8) & 0xFF);
return output;
}
function str2rstr_utf16be(input)
{
var output = "";
for(var i = 0; i < input.length; i++)
output += String.fromCharCode((input.charCodeAt(i) >>> 8) & 0xFF,
input.charCodeAt(i) & 0xFF);
return output;
}
//
// Convert a raw string to an array of big-endian words
// Characters >255 have their high-byte silently ignored.
//
function rstr2binb(input)
{
var output = Array(input.length >> 2);
for(var i = 0; i < output.length; i++)
output[i] = 0;
for(var i = 0; i < input.length * 8; i += 8)
output[i>>5] |= (input.charCodeAt(i / 8) & 0xFF) << (24 - i % 32);
return output;
}
//
// Convert an array of big-endian words to a string
//
function binb2rstr(input)
{
var output = "";
for(var i = 0; i < input.length * 32; i += 8)
output += String.fromCharCode((input[i>>5] >>> (24 - i % 32)) & 0xFF);
return output;
}
//
// Calculate the SHA-1 of an array of big-endian words, and a bit length
//
function binb_sha1(x, len)
{
// append padding
x[len >> 5] |= 0x80 << (24 - len % 32);
x[((len + 64 >> 9) << 4) + 15] = len;
var w = Array(80);
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var e = -1009589776;
for(var i = 0; i < x.length; i += 16)
{
var olda = a;
var oldb = b;
var oldc = c;
var oldd = d;
var olde = e;
for(var j = 0; j < 80; j++)
{
if(j < 16) w[j] = x[i + j];
else w[j] = bit_rol(w[j-3] ^ w[j-8] ^ w[j-14] ^ w[j-16], 1);
var t = safe_add(safe_add(bit_rol(a, 5), sha1_ft(j, b, c, d)),
safe_add(safe_add(e, w[j]), sha1_kt(j)));
e = d;
d = c;
c = bit_rol(b, 30);
b = a;
a = t;
}
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
e = safe_add(e, olde);
}
return Array(a, b, c, d, e);
}
//
// Perform the appropriate triplet combination function for the current
// iteration
//
function sha1_ft(t, b, c, d)
{
if(t < 20) return (b & c) | ((~b) & d);
if(t < 40) return b ^ c ^ d;
if(t < 60) return (b & c) | (b & d) | (c & d);
return b ^ c ^ d;
}
//
// Determine the appropriate additive constant for the current iteration
//
function sha1_kt(t)
{
return (t < 20) ? 1518500249 : (t < 40) ? 1859775393 :
(t < 60) ? -1894007588 : -899497514;
}
//
// Add integers, wrapping at 2^32. This uses 16-bit operations internally
// to work around bugs in some JS interpreters.
//
function safe_add(x, y)
{
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
//
// Bitwise rotate a 32-bit number to the left.
//
function bit_rol(num, cnt)
{
return (num << cnt) | (num >>> (32 - cnt));
}
return rstr2hex(rstr_sha1(str2rstr_utf8(s)));
}-*/;
}
I'm using it in my client side sha generation and it worked well.
I get KERN_PROTECTION_FAILURE somewhere (stack trace shows it's happening in main loop but won't give me more details because it seems that memory got corrupted in previous loop. I have all the settings to see debug output correctly)
When I remove calling the following code, the symptom goes away.
(Verify receipt for in App purchasee)
- (NSString *)encode:(const uint8_t *)input length:(NSInteger)length {
static char table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
NSMutableData *data = [NSMutableData dataWithLength:((length + 2) / 3) * 4];
uint8_t *output = (uint8_t *)data.mutableBytes;
for (NSInteger i = 0; i < length; i += 3) {
NSInteger value = 0;
for (NSInteger j = i; j < (i + 3); j++) {
value <<= 8;
if (j < length) {
value |= (0xFF & input[j]);
}
}
NSInteger index = (i / 3) * 4;
output[index + 0] = table[(value >> 18) & 0x3F];
output[index + 1] = table[(value >> 12) & 0x3F];
output[index + 2] = (i + 1) < length ? table[(value >> 6) & 0x3F] : '=';
output[index + 3] = (i + 2) < length ? table[(value >> 0) & 0x3F] : '=';
}
return [[[NSString alloc] initWithData:data encoding:NSASCIIStringEncoding] autorelease];
}
I see there are other ways of getting base64 encoding, How do I do base64 encoding on iphone-sdk?
What I find weird is that the 'length' is computed differently.
((length +2 /3) * 4 above, and lentext*4/3+4 below.
Can anyone tell what is going on?
Beside, using the below code, I get 'receipt data-malformed' error when I pass the encoded data to apple server.
+ (NSString *) base64StringFromData: (NSData *)data length: (int)length {
int lentext = [data length];
if (lentext < 1) return #"";
char *outbuf = malloc(lentext*4/3+4); // add 4 to be sure
if ( !outbuf ) return nil;
const unsigned char *raw = [data bytes];
int inp = 0;
int outp = 0;
int do_now = lentext - (lentext%3);
for ( outp = 0, inp = 0; inp < do_now; inp += 3 )
{
outbuf[outp++] = base64EncodingTable[(raw[inp] & 0xFC) >> 2];
outbuf[outp++] = base64EncodingTable[((raw[inp] & 0x03) << 4) | ((raw[inp+1] & 0xF0) >> 4)];
outbuf[outp++] = base64EncodingTable[((raw[inp+1] & 0x0F) << 2) | ((raw[inp+2] & 0xC0) >> 6)];
outbuf[outp++] = base64EncodingTable[raw[inp+2] & 0x3F];
}
if ( do_now < lentext )
{
char tmpbuf[2] = {0,0};
int left = lentext%3;
for ( int i=0; i < left; i++ )
{
tmpbuf[i] = raw[do_now+i];
}
raw = tmpbuf;
outbuf[outp++] = base64EncodingTable[(raw[inp] & 0xFC) >> 2];
outbuf[outp++] = base64EncodingTable[((raw[inp] & 0x03) << 4) | ((raw[inp+1] & 0xF0) >> 4)];
if ( left == 2 ) outbuf[outp++] = base64EncodingTable[((raw[inp+1] & 0x0F) << 2) | ((raw[inp+2] & 0xC0) >> 6)];
}
NSString *ret = [[[NSString alloc] initWithBytes:outbuf length:outp encoding:NSASCIIStringEncoding] autorelease];
free(outbuf);
return ret;
}
I’m using this very small library for encode/decode Base64: http://imthi.com/blog/programming/iphone-sdk-base64-encode-decode.php
It does its work and I assume you use a similar version of it. Did you try to call +initialize before the first usage?
I'm familiar with WideCharToMultiByte and MultiByteToWideChar conversions and could use these to do something like:
UTF8 -> UTF16 -> 1252
I know that iconv will do what I need, but does anybody know of any MS libs that will allow this in a single call?
I should probably just pull in the iconv library, but am feeling lazy.
Thanks
Windows 1252 is mostly equivalent to latin-1, aka ISO-8859-1: Windows-1252 just has some additional characters allocated in the latin-1 reserved range 128-159. If you are ready to ignore those extra characters, and stick to latin-1, then conversion is rather easy. Try this:
#include <stddef.h>
/*
* Convert from UTF-8 to latin-1. Invalid encodings, and encodings of
* code points beyond 255, are replaced by question marks. No more than
* dst_max_len bytes are stored in the destination array. Returned value
* is the length that the latin-1 string would have had, assuming a big
* enough destination buffer.
*/
size_t
utf8_to_latin1(char *src, size_t src_len,
char *dst, size_t dst_max_len)
{
unsigned char *sb;
size_t u, v;
u = v = 0;
sb = (unsigned char *)src;
while (u < src_len) {
int c = sb[u ++];
if (c >= 0x80) {
if (c >= 0xC0 && c < 0xE0) {
if (u == src_len) {
c = '?';
} else {
int w = sb[u];
if (w >= 0x80 && w < 0xC0) {
u ++;
c = ((c & 0x1F) << 6)
+ (w & 0x3F);
} else {
c = '?';
}
}
} else {
int i;
for (i = 6; i >= 0; i --)
if (!(c & (1 << i)))
break;
c = '?';
u += i;
}
}
if (v < dst_max_len)
dst[v] = (char)c;
v ++;
}
return v;
}
/*
* Convert from latin-1 to UTF-8. No more than dst_max_len bytes are
* stored in the destination array. Returned value is the length that
* the UTF-8 string would have had, assuming a big enough destination
* buffer.
*/
size_t
latin1_to_utf8(char *src, size_t src_len,
char *dst, size_t dst_max_len)
{
unsigned char *sb;
size_t u, v;
u = v = 0;
sb = (unsigned char *)src;
while (u < src_len) {
int c = sb[u ++];
if (c < 0x80) {
if (v < dst_max_len)
dst[v] = (char)c;
v ++;
} else {
int h = 0xC0 + (c >> 6);
int l = 0x80 + (c & 0x3F);
if (v < dst_max_len) {
dst[v] = (char)h;
if ((v + 1) < dst_max_len)
dst[v + 1] = (char)l;
}
v += 2;
}
}
return v;
}
Note that I make no guarantee about this code. This is completely untested.