NSInteger problem - iphone

How to use the NSInteger give example and simplecode

NSInteger is defined like this:
#if __LP64__ || NS_BUILD_32_LIKE_64
typedef long NSInteger;
typedef unsigned long NSUInteger;
#else
typedef int NSInteger;
typedef unsigned int NSUInteger;
#endif
It's the same as int on 32Bit or long on 64Bit.

NSInteger a, b, c;
a = 2;
b = 40;
c = a + b;
NSLog(#"THE answer is: %d\n", c);

Related

CUDA sha256 produce difference hash compared to OpenSSL

iam trying to port my sha256 hash function from CPU code to CUDA. after googling, i found few working example for cuda sha256. However when tested, the hash result of cuda sha256 is difference from OpenSSL.
My input is "hello world" which is declared as const char*. result are as below;
Constant Char* Input : hello world
Hash on CPU : b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9
Hash on GPU : c1114db6b517b4db8d360a9e14f5c2a57de95d955ec20cbd4cb73facb2b13e5f
I need help to fix my GPU code for sha256 so that it will produce same hash as given by CPU (OpenSSL).
Here my code for CPU Hash
#pragma warning(disable : 4996) //disable compiler error
#include <iostream>
#include <openssl/sha.h>
unsigned char hash[SHA256_DIGEST_LENGTH];
void SHA256(const char* input, size_t input_size){
SHA256_CTX sha256;
SHA256_Init(&sha256);
SHA256_Update(&sha256, input, input_size);
SHA256_Final(hash, &sha256);
}
void CPU() {
const char* input = "hello world";
size_t input_size = strlen(input);
SHA256(input, input_size);
for (size_t i = 0; i < 32; i++) {
printf("%02x", hash[i]);
}
printf("\n");
}
and Here my code for GPU hash
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 256
__constant__ unsigned int k[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
__device__ unsigned int Ch(unsigned int x, unsigned int y, unsigned int z) {
return (x & y) ^ (~x & z);
}
__device__ unsigned int Maj(unsigned int x, unsigned int y, unsigned int z) {
return (x & y) ^ (x & z) ^ (y & z);
}
__device__ unsigned int Sigma0(unsigned int x) {
return (x >> 2u) | (x << 30u);
}
__device__ unsigned int Sigma1(unsigned int x) {
return (x >> 6u) | (x << 26u);
}
__device__ unsigned int sigma0(unsigned int x) {
return (x >> 7u) | (x << 25u);
}
__device__ unsigned int sigma1(unsigned int x) {
return (x >> 17u) | (x << 15u);
}
//solve using 256 thread in 1 block
__global__ void sha256_kernel(const char* input, size_t input_size, unsigned char* output) {
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t grid_size = blockDim.x * gridDim.x;
for (; i < input_size; i += grid_size) {
unsigned int h[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
unsigned int w[64];
for (size_t j = 0; j < input_size; j += 64) {
for (size_t t = 0; t < 16; t++) {
w[t] = ((unsigned int)input[j + t * 4 + 0] << 24u) | ((unsigned int)input[j + t * 4 + 1] << 16u) |
((unsigned int)input[j + t * 4 + 2] << 8u) | ((unsigned int)input[j + t * 4 + 3] << 0u);
}
for (size_t t = 16; t < 64; t++) {
w[t] = sigma1(w[t - 2]) + w[t - 7] + sigma0(w[t - 15]) + w[t - 16];
}
unsigned int a = h[0];
unsigned int b = h[1];
unsigned int c = h[2];
unsigned int d = h[3];
unsigned int e = h[4];
unsigned int f = h[5];
unsigned int g = h[6];
unsigned int hh = h[7];
for (size_t t = 0; t < 64; t++) {
unsigned int t1 = hh + Sigma1(e) + Ch(e, f, g) + k[t] + w[t];
unsigned int t2 = Sigma0(a) + Maj(a, b, c);
hh = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
h[0] += a;
h[1] += b;
h[2] += c;
h[3] += d;
h[4] += e;
h[5] += f;
h[6] += g;
h[7] += hh;
}
for (size_t t = 0; t < 8; t++) {
output[i + t * 4 + 0] = (unsigned char)(h[t] >> 24u);
output[i + t * 4 + 1] = (unsigned char)(h[t] >> 16u);
output[i + t * 4 + 2] = (unsigned char)(h[t] >> 8u);
output[i + t * 4 + 3] = (unsigned char)(h[t] >> 0u);
}
}
}
void GPU() {
const char* input = "hello world";
size_t input_size = strlen(input);
size_t output_size = 32;
unsigned char* output;
char* input_device;
cudaMalloc((void**)&output, output_size);
cudaMalloc((void**)&input_device, input_size);
cudaMemcpy(input_device, input, input_size, cudaMemcpyHostToDevice);
//solve using 256 thread and 1 block
sha256_kernel << < ((input_size + BLOCK_SIZE - 1) / BLOCK_SIZE), BLOCK_SIZE >> > (input_device, input_size, output);
unsigned char* output_host = (unsigned char*)malloc(output_size);
cudaMemcpy(output_host, output, output_size, cudaMemcpyDeviceToHost);
for (size_t i = 0; i < output_size; i++) {
printf("%02x", output_host[i]);
}
printf("\n");
free(output_host);
cudaFree(output);
cudaFree(input_device);}
Thanks in advance.

Cairo: Draw an array of pixels

I'm just starting to explore Cairo, but right now I really want to use it for something very simple.
I have a very low-tech bitmap, i.e., a 3*X*Y array of numbers. I'd like to use Cairo to make this into a bitmap and write to a file. I'm looking through tutorials and I'm not seeing a way to use it for comparatively low-level functions like this.
I don't think I need guidance on how to use the tool once I know what the tool is.
I didn't actually test this, but the following should give you lots of useful hints:
#include <cairo.h>
#include <stdint.h>
#define WIDTH 42
#define HEIGHT 42
uint8_t data[WIDTH][HEIGHT][3];
cairo_surface_t* convert()
{
cairo_surface_t *result;
unsigned char *current_row;
int stride;
result = cairo_image_surface_create(CAIRO_FORMAT_RGB24, WIDTH, HEIGHT);
if (cairo_surface_status(result) != CAIRO_STATUS_SUCCESS)
return result;
cairo_surface_flush(result);
current_row = cairo_image_surface_get_data(result);
stride = cairo_image_surface_get_stride(result);
for (int y = 0; y < HEIGHT; y++) {
uint32_t *row = (void *) current_row;
for (int x = 0; x < WIDTH; x++) {
uint32_t r = data[x][y][0];
uint32_t g = data[x][y][1];
uint32_t b = data[x][y][2];
row[x] = (r << 16) | (g << 8) | b;
}
current_row += stride;
}
cairo_surface_mark_dirty(result);
return result;
}

Reading a structured binary .dat file

I'm unable to read a binary .dat file which is in the following structure :
typedef struct {
unsigned char message_code;
unsigned char code;
unsigned char sequence;
unsigned char reserve_1;
float ladle_weight;
float tundish_weight;
unsigned char reserve_2;
unsigned char no_of_measurements;
unsigned int measurement_1;
unsigned int measurement_2;
unsigned int measurement_3;
unsigned int measurement_4;
unsigned int measurement_5;
unsigned int measurement_6;
unsigned int measurement_7;
unsigned int measurement_8;
} message_metal;
typedef struct {
unsigned char message_code;
unsigned char strand_no;
unsigned char steel_code;
unsigned char container_state;
float water_pr_at_mould;
float water_con_at_mould;
float temp_diff_of_water_at_mould;
float water_pr_at_common_intake;
float water_con_at_section_1;
float water_con_at_section_2;
float water_con_at_section_3;
float water_con_at_section_4;
float water_con_at_section_5;
float water_pr_at_section_1;
float water_pr_at_section_2;
float water_pr_at_section_3;
float water_pr_at_section_4;
float water_pr_at_section_5;
float withdrawl_speed;
float metal_level_in_mould;
float bloom_length;
} message_strand;
typedef struct {
message_metal metal;
message_strand strand[4];
unsigned char hour;
unsigned char min;
unsigned char sec;
} machine;
The .dat files cannot be attached.

Unable to link external lib (CLAPACK) using MATLAB mex

I'm new to mex and this problem spent me days but I still cannot figure out what to do.
I create la_test.cpp file to test one of the subroutines in CLAPCK: cgemm_ (complex matrix-matrix multiplication). Here is the code:
#include "mex.h"
#include "matrix.h"
#include <string.h>
#include <stdlib.h>
#include <malloc.h>
#include <iostream>
#include <stdio.h>
#include "f2c.h"
#include "clapack.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
float *Xr,*Xi; // To store the input data
float *zsr,*zsi; // To store the output
long int m,n;
complex *A;
Xr = (float *) mxGetPr(prhs[0]);
Xi = (float *) mxGetPi(prhs[0]);
size_t K = mxGetNumberOfDimensions(prhs[0]);
const int *size = mxGetDimensions(prhs[0]);
m = mxGetM(prhs[0]);
n = mxGetN(prhs[0]);
A = new complex[m*n];
complex one = {1.0f, 0.0f}, zero = {0.0f, 0.0f};
for (int i=0; i<m; i++){
for (int j=0; j<n; j++){
complex rc = {Xr[j + n*i], Xi[j + n*i]};
A[j + n*i] = rc;
}
}
plhs[0] =(mxArray *) mxCreateDoubleMatrix( n, n, mxCOMPLEX );
zsr = (float *) mxGetPr(plhs[0]);
zsi = (float *) mxGetPi(plhs[0]);
complex *AtA = 0;
AtA = new complex[n*n];
char *chn = "N";
char *chc = "C";
cgemm_(chc, chn, &n, &n, &m, &one, A, &m, A, &m, &zero, AtA, &n);
for (int i=0; i<m; i++){
for (int j=0; j<n; j++){
zsr[j + n*i] = AtA[j + n*i].r;
zsi[j + n*i] = AtA[j + n*i].i;
}
}
}
Basically, I store input matrix into A and try to compute A'*A. Header files: f2c.h, clapack.h as well as three 64bit libraries: blas.lib, libf2c.lib and lapack.lib from http://icl.eecs.utk.edu/lapack-for-windows/clapack/index.html#install are all in the same file of la_test.cpp. I'm working on Windows 7 64bit system with matlab r2013a and Visual Studio 2012.
I have tried with both:
mex la_test.cpp lapack.lib libf2c.lib blas.lib
and
mex -llapack -llibf2c -lblas -L"C:\Users\Ziwu\Desktop\la_test" la_test.cpp
all with following error:
Creating library C:\Users\Ziwu\AppData\Local\Temp\mex_epl230\templib.x and object C:\Users\Ziwu\AppData\Local\Temp\mex_epl230\templib.exp
la_test.obj : error LNK2019: unresolved external symbol cgemm_ referenced in function mexFunction
la_test.mexw64 : fatal error LNK1120: 1 unresolved externals
I've checked on Internet for long time, but found no solution yet!
Please help me if you have any advice.

Xor between 2 NSString gives wrong result

I have this method to make a xor between 2 NSStrings, i´m printing the result on NSLog but it isn´t the expect.
Can´t figure out what i´m doing wrong.
(void)XorSecretKeyDeviceId
{
NSString* secretKey = #"123";//
NSString* deviceId = #"abcdef";//
NSData* stringKey = [secretKey dataUsingEncoding:NSUTF8StringEncoding];
NSData* stringDeviceId = [deviceId dataUsingEncoding:NSUTF8StringEncoding];
unsigned char* pBytesInput = (unsigned char*)[stringKey bytes]; //Bytes
unsigned char* pBytesKey = (unsigned char*)[stringDeviceId bytes];
unsigned int vlen = [secretKey length]; //Keys Length
unsigned int klen = [deviceId length];
unsigned int v;
unsigned int k = vlen % klen;
unsigned char c;
for(v = 0; v < vlen; v++)
{
c = pBytesInput[v] ^ pBytesKey[k];
pBytesInput[v] = c;
NSLog(#"%c", c);
k = (++k < klen ? k : 0);
}
}
Are you setting your pBytesInput and pBytesKey variables correctly? At the moment, you have unsigned char* pBytesInput = (unsigned char*)[stringKey bytes]; (i.e. the input is the "key"), and pBytesKey is the device ID. This seems odd.
Also, be careful using UTF-8 encoding. UTF-8 uses the high bit on any byte in the string to indicate a "continuation" of a multi-byte character into the next byte. Your encoding could plausibly generate invalid UTF-8 by giving the setting the high bit of the final byte in the encryption.
For more than that, you'll have to say what the "wrong result" is.