Microsoft SEAL : Subtraction of two PolyCRT composed ciphertext results in overflow - seal

Suppose I have two arrays x = [1,2,3,4,5] and xMean = [3,3,3,3,3]. I composed and encrypted the two array using PolyCRTBuilder ( xCiphertext and xMeanCiphertext ) . If I subtract the two ciphertexts ( xCiphertext MINUS xMeanCiphertext ), I should get xResult = [-2, -1, 0, 1, 2] but after the homomorphic subtraction I am getting xResultDecrypted = [40959, 40960, 0 ,1, 2] . I can relate the overflow result to the plain modulus set but is there a work around for this problem. Here is the code:
int main() {
EncryptionParameters parms;
parms.set_poly_modulus("1x^4096 + 1");
parms.set_coeff_modulus(coeff_modulus_128(4096));
parms.set_plain_modulus(40961);
SEALContext context(parms);
KeyGenerator keygen(context);
auto public_key = keygen.public_key();
auto secret_key = keygen.secret_key();
Encryptor encryptor(context, public_key);
Evaluator evaluator(context);
Decryptor decryptor(context, secret_key);
PolyCRTBuilder crtbuilder(context);
int slot_count = crtbuilder.slot_count();
int row_size = slot_count / 2;
vector<uint64_t> x_pod_matrix(slot_count, 0);
x_pod_matrix[0] = 1;
x_pod_matrix[1] = 2;
x_pod_matrix[2] = 3;
x_pod_matrix[3] = 4;
x_pod_matrix[4] = 5;
Plaintext x_plain_matrix;
crtbuilder.compose(x_pod_matrix, x_plain_matrix);
Ciphertext x_encrypted_matrix;
encryptor.encrypt(x_plain_matrix, x_encrypted_matrix);
vector<uint64_t> x_mean_pod_matrix(slot_count, 0);
x_mean_pod_matrix[0] = 3;
x_mean_pod_matrix[1] = 3;
x_mean_pod_matrix[2] = 3;
x_mean_pod_matrix[3] = 3;
x_mean_pod_matrix[4] = 3;
Plaintext x_mean_plain_matrix;
crtbuilder.compose(x_mean_pod_matrix, x_mean_plain_matrix);
Ciphertext x_mean_encrypted_matrix;
encryptor.encrypt(x_mean_plain_matrix, x_mean_encrypted_matrix);
evaluator.sub_plain(x_encrypted_matrix, x_mean_encrypted_matrix);
// Decrypt x_encrypted_matrix
Plaintext x_plain_result;
decryptor.decrypt(x_encrypted_matrix, x_plain_result);
vector<uint64_t> pod_result;
crtbuilder.decompose(x_plain_result, pod_result);
for(int i = 0; i < 5; i++) {
std::cout << pod_result[i] << '\n';
}
/*
Expected output:
-2
-1
0
1
2
*/
/*
Actual output:
40959
40960
0
1
2
*/
return 0;
}
evaluator.negate() isn't going to help solve my problem.

So, the result you get is correct because the plaintext is only defined modulo plain_modulus. There is an overload of PolyCRTBuilder::decompose that takes a vector of std::int64_t and will automatically do the conversion you are hoping to see.

Related

Solving Twitter Puddle with Zipper

This is a follow-up to my previous question. Consider the following puzzle
I would like to generate a waterLevel array, so that the i-th item is the water level at the i-th point and then sum them up to solve the puzzle.
waterLevel[i] =
max(0, min(max of left neighbors, max of right neighbors) - height[i])
I would probably try to code it with Zipper
waterLevels = heights.toZipper.cobind {z =>
max(0, min(max(z.left), max(z.right)) - z.focus
}.toList
Does it make sense ?
My solution with java, it comes with tests with expected solution:
package com.company;
import java.util.*;
enum Orientation {DOWN, UP};
class Walls{
public static void main(String []args){
HashMap<String, Integer> tests = new HashMap<String,Integer>();
tests.put("2 5 1 2 3 4 7 7 6", 10);
tests.put("2 2 5 1 3 1 2 1 7 7 6", 17);
tests.put("2 7 2 7 4 7 1 7 3 7", 18);
tests.put("4 6 7 7 4 3 2 1 5 2", 10);
tests.put("5 2 5 1 2 3 4 7 7 6 2 7 1 2 3 4 5 5 4", 26);
Iterator it = tests.entrySet().iterator();
while (it.hasNext()) {
Map.Entry pairs = (Map.Entry)it.next();
it.remove();
String[] strings = ((String)pairs.getKey()).split(" ");
int[] walls = new int[strings.length];
for (int i = 0; i < walls.length; i++){
walls[i] = Integer.parseInt(strings[i].trim());
}
System.out.println(pairs.getKey()+" result="+accumulatedWater(walls)+" expected= " +pairs.getValue());
}
}
static int accumulatedWater(int []wall){
int MAX = 0;
int start = 0;
for(int i=0;i < wall.length;i++){ //let's go to the first peak
if(wall[i] >= MAX){
MAX = wall[i];
start = i;
}else{
break;
}
}
int []accumulate_max = new int[MAX+1]; // sums up to certain height
int []accumulate_max_step = new int[MAX+1]; // steps up to certain height
Orientation going = Orientation.DOWN;
int prev = MAX;
int local_sum=0;
int total_sum=0;
int PREVPEAK = MAX;
for(int i=start+1; i< wall.length; i++){
if( i == wall.length -1 ||
wall[i] < prev && going == Orientation.UP ){
going = Orientation.DOWN;
if(wall[i-1] >= MAX){
total_sum += accumulate_max_step[MAX-1] * MAX - accumulate_max[MAX-1];
MAX = wall[i-1];
PREVPEAK = MAX;
accumulate_max = new int[MAX+1];
accumulate_max_step = new int[MAX+1];
local_sum = 0;
}else{
int indexNewPeak = (i == wall.length -1 && wall[i]> wall[i-1]) ? i : i-1;
int water = accumulate_max_step[wall[indexNewPeak]-1] * wall[indexNewPeak] - accumulate_max[wall[indexNewPeak]-1];
if(wall[indexNewPeak] > PREVPEAK){
local_sum = water;
PREVPEAK = wall[indexNewPeak];
}else{
local_sum += water;
}
}
}else if(wall[i]>prev){
going = Orientation.UP;
}
for(int j=wall[i];j <= MAX;j++){
accumulate_max[j] += wall[i];
accumulate_max_step[j] += 1;
}
prev = wall[i];
}
return total_sum + local_sum;
}
}

Converting words to numbers

I have problem converting words into numbers like
Input:
Five Thousand Six Hundred Thirty two
Output:
5632
How can I do this?
Here, i did it in python, it will help you or someone else from algorithmic perspective.
#!/usr/bin/python
__author__ = 'tomcat'
all = {
"one" : 1,
"two" : 2,
"three" : 3,
"four" : 4,
"five" : 5,
"six" : 6,
"seven" : 7,
"eight" : 8,
"nine" : 9,
"ten" : 10,
"eleven": 11,
"twelve": 12,
"thirteen": 13,
"fourteen": 14,
"fifteen": 15,
"sixteen": 16,
"seventeen": 17,
"eighteen": 18,
"nineteen": 19,
"twenty" : 20,
"thirty" : 30,
"forty" : 40,
"fifty" : 50,
"sixty" : 60,
"seventy" : 70,
"eighty" : 80,
"ninety" : 90,
"hundred" : 100,
"thousand" : 1000,
"million" : 1000000,
"billion" : 1000000000,
"trillion" : 1000000000000,
"quadrillion" : 1000000000000000,
"quintillion" : 1000000000000000000,
"sextillion" : 1000000000000000000000,
"septillion" : 1000000000000000000000000,
"octillion" : 1000000000000000000000000000,
"nonillion" : 1000000000000000000000000000000
};
spliter = {
"thousand" : 1000,
"million" : 1000000,
"billion" : 1000000000,
"trillion" : 1000000000000,
"quadrillion" : 1000000000000000,
"quintillion" : 1000000000000000000,
"sextillion" : 1000000000000000000000,
"septillion" : 1000000000000000000000000,
"octillion" : 1000000000000000000000000000,
"nonillion" : 1000000000000000000000000000000
};
inputnumber = raw_input("Please enter string number : ");
tokens = inputnumber.split(" ");
result = 0;
partial_result = 0;
for index in range(len(tokens)):
if tokens[index] in spliter :
if partial_result == 0:
partial_result = 1;
partial_result *= all[tokens[index]];
result += partial_result;
partial_result = 0;
else:
if tokens[index] == "hundred" :
if partial_result == 0:
partial_result = 1;
partial_result *= all[tokens[index]];
else:
partial_result += all[tokens[index]];
result += partial_result;
print result;
How would you do this, in general if you didn't have to code it up?? In this example your collection of words is:
Five Thousand Six Hundred Thirty two
We could convert each of those to numbers to get the following collection:
5 1000 6 100 30 2
Starting from 5 (hint: 5 < 1000 is to the left of 1000. This suggests...!??) what steps would you follow in getting to the number 5632?
What if the number was
Six hundred thirty three billion fifty four million two hundred twenty three thousand four?
Can you figure out some sort of rule (or better, an algorithm)?
Once you have broken the big problem down into a collection of little problems then the next battle is to find the correct way of coding something that correctly solves each little problem
Hope this gives you some start:-
#include <iostream>
#include <string>
#include <map>
using namespace std;
map<string, int> reference;
string ones[] = {"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"};
void storeOnes(){
for(int i = 0; i < 11; i++){
reference[ones[i]] = i;
}
}
int main(){
//set up
storeOnes();
string test = "onetwothreetwofour";
string buffer;
for(int i = 0; i < test.length(); i++){
buffer.push_back(test.at(i));
map<string, int>::iterator it = reference.find(buffer);
if(it != reference.end()){
cout << (*it).second;
buffer = "";
}
}
cout << endl << endl;
system("pause");
return 0;
}
Consider using map. Say five thousand six hundred three ten two.
#include<iostream>
#include<map>
using namespace std;
int main()
{
map<string,int> digits;
digits["one"] = 1;
digits["two"] = 2;
digits["three"] = 3;
digits["four"] = 4;
digits["five"] = 5;
digits["six"] = 6;
digits["seven"] = 7;
digits["eight"] = 8;
digits["nine"] = 9;
digits["ten"] = 10;
digits["hundred"] = 10;
digits["thousand"] = 1000;
const int num_len = 7;
string num_str[num_len]={"five", "thousand", "six", "hundred", "three", "ten", "two"};
int number = digits[num_str[0]]*digits[num_str[1]] +
digits[num_str[2]]*digits[num_str[3]] +
digits[num_str[4]]*digits[num_str[5]] +
digits[num_str[6]];
cout << number;
}
Another way to do this is by recursion as is done here (in java and c++)
https://github.com/jman27182818/words_to_numbers
Most of that code is about parsing the string in order to obtain a string vector which can be acted upon recursively. Essentially the algorithms is
A[] = String array //example {"three","hundred"}
Integer converter(A){
if(length(A) <= 4)
handle_base_case;
//either A only has small values or is of the form
//{"three","billion"}
//if length is greater than 4 the array must have a large value
index = find_first_value_greater_than_100(A);
arrayl = A[1:index-1];
arrayr = A[index+1:A.end()];
return (convert_hundreds(arrayl) * Value_of(A[index])+ converter(arrayr) );
}
where "convert_hundreds" takes an array with strings whose values are no large than 100 each (or 1,000 for spanish) and returns the numerical value. This algorithm is more memory intensive than the previous one but I like it because it seems to generalize to other languages better.
Here's a process in C++
input : nine thousand nine hundred ninety nine
output: 9999
vector 'talika' looks like this:
9 1000 9 100 9 10 9 1
#include <iostream>
#include <bits/stdc++.h>
using namespace std;
bool endswith(string a, string b)
{
//To check if the word ends with ty eg. thirty, forty
int len = a.length();
string last_two = a.substr(len-2, len-1); //ty
if((last_two.compare(b)) == 0)return true;
return false;
}
int main() {
int t,i,j;
int n,cnt, num;
cin >> t;
getchar();
map<string, int>conv;
vector<int>talika;
conv["one"] = 1;
conv["two"] = 2;
conv["three"] = 3;
conv["four"] = 4;
conv["five"] = 5;
conv["six"] = 6;
conv["seven"] = 7;
conv["eight"] = 8;
conv["nine"] = 9;
conv["ten"] = 10;
conv["eleven"] = 11;
conv["twelve"] = 12;
conv["thirteen"] = 13;
conv["fourteen"] = 14;
conv["fifteen"] = 15;
conv["sixteen"] = 16;
conv["seventeen"] = 17;
conv["eighteen"] = 18;
conv["ninteen"] = 19;
conv["thousand"] = 1000;
conv["hundred"] = 100;
conv["twenty"] = 20;
conv["thirty"] = 30;
conv["forty"] = 40;
conv["fifty"] = 50;
conv["sixty"] = 60;
conv["seventy"] = 70;
conv["eighty"] = 80;
conv["ninety"] = 90;
while(t--)
{
string num_in_word, ongsho;
getline(cin,num_in_word);//get the number in words
stringstream x(num_in_word);
bool sheshe_ty = false;
while(getline(x,ongsho, ' '))
{
num = conv[ongsho];
if(endswith(ongsho,"ty"))
{
talika.push_back(num/10);
talika.push_back(10);
sheshe_ty = true;
continue;
}
talika.push_back(num);
sheshe_ty = false;
}
if(conv[ongsho] != 1000 && conv[ongsho] != 100 && sheshe_ty == false){
talika.push_back(1);
}
num = 0;
for(i=0;i < talika.size();i++)
{
num += talika[i] * talika[i+1];
i++;
}
cout << "The Number: " << num << endl;
talika.clear();
}
return 0;
}
This may help :
#include <bits/stdc++.h>
using namespace std;
/*
* Complete the 'getPhoneNumber' function below.
*
* The function is expected to return a STRING.
* The function accepts STRING s as parameter.
*/
string wordsToNumber(string s)
{
vector<string> allStringNums{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"};
vector<string> operations{"double", "triple"};
string search;
string ans = "";
int op = 0;
for (int i = 0; i < s.length(); i++)
{
if (i == s.length() - 1)
{
search.push_back(s.at(i));
}
if (isspace(s[i]) || i == s.length() - 1)
{
// cout << search << endl;
auto it = find(allStringNums.begin(), allStringNums.end(), search);
if (it != allStringNums.end())
{
if (op > 0)
{
for (int j = 0; j < op; j++)
{
ans += to_string(it - allStringNums.begin());
}
op = 0;
}
else
{
ans += to_string(it - allStringNums.begin());
}
}
else
{
it = find(operations.begin(), operations.end(), search);
if (it != operations.end())
{
op = (it - operations.begin()) + 2;
}
// cout << it - operations.begin() << " " << op << endl;
}
search = "";
}
else
{
search.push_back(s.at(i));
}
}
cout << ans << endl;
return ans;
}
int main()
{
ofstream fout(getenv("OUTPUT_PATH"));
string s;
getline(cin, s);
string result = wordsToNumber(s);
fout << result << "\n";
fout.close();
return 0;
}
Input Will Be Like one two three four nine five three one or
something like one two double three five six triple four six nine
Their Outputs Will Be 12349531 and 123356444469

Calculate IRR (Internal Rate Return) and NPV programmatically in Objective-C

I am developing a financial app and require IRR (in-built functionality of Excel) calculation and found such great tutorials in C here and such answer in C# here.
I implemented code of the C language above, but it gives a perfect result when IRR is in positive. It is not returning a negative value when it should be. Whereas in Excel =IRR(values,guessrate) returns negative IRR as well for some values.
I have referred to code in above C# link too, and it seems that it follows good procedures and returns errors and also hope that it returns negative IRR too, the same as Excel. But I am not familiar with C#, so I am not able to implement the same code in Objective-C or C.
I am writing C code from the above link which I have implemented for helping you guys.
#define LOW_RATE 0.01
#define HIGH_RATE 0.5
#define MAX_ITERATION 1000
#define PRECISION_REQ 0.00000001
double computeIRR(double cf[], int numOfFlows)
{
int i = 0, j = 0;
double m = 0.0;
double old = 0.00;
double new = 0.00;
double oldguessRate = LOW_RATE;
double newguessRate = LOW_RATE;
double guessRate = LOW_RATE;
double lowGuessRate = LOW_RATE;
double highGuessRate = HIGH_RATE;
double npv = 0.0;
double denom = 0.0;
for (i=0; i<MAX_ITERATION; i++)
{
npv = 0.00;
for (j=0; j<numOfFlows; j++)
{
denom = pow((1 + guessRate),j);
npv = npv + (cf[j]/denom);
}
/* Stop checking once the required precision is achieved */
if ((npv > 0) && (npv < PRECISION_REQ))
break;
if (old == 0)
old = npv;
else
old = new;
new = npv;
if (i > 0)
{
if (old < new)
{
if (old < 0 && new < 0)
highGuessRate = newguessRate;
else
lowGuessRate = newguessRate;
}
else
{
if (old > 0 && new > 0)
lowGuessRate = newguessRate;
else
highGuessRate = newguessRate;
}
}
oldguessRate = guessRate;
guessRate = (lowGuessRate + highGuessRate) / 2;
newguessRate = guessRate;
}
return guessRate;
}
I have attached the result for some value which are different in Excel and the above C language code.
Values: Output of Excel: -33.5%
1 = -18.5, Output of C code: 0.010 or say (1.0%)
2 = -18.5,
3 = -18.5,
4 = -18.5,
5 = -18.5,
6 = 32.0
Guess rate: 0.1
Since low_rate and high_rate are both positive, you're not able to get a negative score. You have to change:
#define LOW_RATE 0.01
to, for example,
#define LOW_RATE -0.5

simple number series

This is a simple number series question, I have numbers in series like
2,4,8,16,32,64,128,256 these numbers are formed by 2,2(square),2(cube) and so on.
Now if I add 2+4+8 = 14. 14 will get only by the addition 2,4 and 8.
so i have 14in my hand now, By some logic i need to get the values which are helped to get 14
Example:
2+4+8 = 14
14(some logic) = 2,4,8.
This is an easy one:
2+4+8=14 ... 14+2=16
2+4+8+16=30 ... 30+2=32
2+4+8+16+32=62 ... 62+2=64
So you just need to add 2 to your sum, then calculate ld (binary logarithm), and then subtract 1. This gives you the number of elements of your sequence you need to add up.
e.g. in PHP:
$target=14;
$count=log($target+2)/log(2)-1;
echo $count;
gives 3, so you have to add the first 3 elements of your sequence to get 14.
Check the following C# code:
x = 14; // In your case
indices = new List<int>();
for (var i = 31; i >= i; i--)
{
var pow = Math.Pow(2, i);
if x - pow >= 0)
{
indices.Add(pow);
x -= pow;
}
}
indices.Reverse();
assuming C:
unsigned int a = 14;
while( a>>=1)
{
printf("%d ", a+1);
}
if this is programming, something like this would suffice:
int myval = 14;
int maxval = 256;
string elements = "";
for (int i = 1; i <= maxval; i*=2)
{
if ((myval & i) != 0)
elements += "," + i.ToString();
}
Use congruency module 2-powers: 14 mod 2 = 0, 14 mod 4 = 2, 14 mod 8 = 6, 14 mod 16 = 14, 14 mod 32 = 14...
The differences of this sequence are the numbers you look for 2 - 0 = 2, 6 - 2 = 4, 14 - 6 = 8, 14 - 14 = 0, ...
It's called the p-adic representation and is formally a bit more difficult to explain, but I hope this gives you an idea for an algorithm.

Small differences in SHA1 hashes

A project I am working on uses Apache Shiro as a security framework. Passwords are SHA1 hashed (no salt, no iterations). Login is SSL secured. However, the remaining part of the application is not SSL secured. In this context (no SSL) there should be a form where a user can change the password.
Since it wouldn't be a good idea to transmit it plainly it should be hashed on the client and then transmitted to the server. As the client is GWT (2.3) based, I am trying this library http://code.google.com/p/gwt-crypto, which uses code from bouncycastle.
However, in many cases (not all) the hashes generated by both frameworks differ in 1-4(?) characters.
For instance "happa3" is hashed to
"fe7f3cffd8a5f0512a5f1120f1369f48cd6f47c2"
by both implementations, whereas just "happa" is hashed to
"fb3c3a741b4e07a87d9cb68f3db020d6fbfed00a"
by the Shiro implementation and to
"fb3c3a741b4e07a87d9cb63f3db020d6fbfed00a"
by the gwt-crypto implementation (23rd character differs).
I wonder whether there is a "correct"/standard SHA1 hashing and whether there is a bug in one of the libraries or maybe my usage of them is flawed.
One of my first thoughts was related to different encodings or strange conversions due to different transport mechanisms (RPC vs. Post). To my knowledge though (and what puzzles me most), SHA1 hashes should differ completely with a high probability if there is just a difference of a single bit. So different encodings shouldn't be the issue here.
I am using this code on the client (GWT) for hashing:
String hashed = toHex(createSHA1Hash("password"));
...
private String createSHA1Hash(String passwordString){
SHA1Digest sha1 = new SHA1Digest();
byte[] bytes;
byte[] result = new byte[sha1.getDigestSize()];
try {
bytes = passwordString.getBytes();
sha1.update(bytes, 0, bytes.length);
int val = sha1.doFinal(result, 0);
} catch (UnsupportedEncodingException e) {}
return new String(result);
}
public String toHex(String arg) {
return new BigInteger(1, arg.getBytes()).toString(16);
}
And this on the server (Shiro):
String hashed = new Sha1Hash("password").toHex()
which afaics does something very similar behind the scenes (had a quick view on the source code).
Did I miss something obvious here?
EDIT: Seems like the GWT code does not run natively for some reason (i.e. just in development mode) and silently fails (it does compile, though). Have to find out why...
Edit(2): "int val = sha1.doFinal(result, 0);" is the line that makes trouble, i.e. if present, the whole code does not run natively (JS) but only in dev-mode (with wrong results)
You could test this version:
public class SHA1 {
public static native String calcSHA1(String s) /*-{
//
// A JavaScript implementation of the Secure Hash Algorithm, SHA-1, as defined
// in FIPS 180-1
// Version 2.2 Copyright Paul Johnston 2000 - 2009.
// Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
// Distributed under the BSD License
// See http://pajhome.org.uk/crypt/md5 for details.
//
//
// Configurable variables. You may need to tweak these to be compatible with
// the server-side, but the defaults work in most cases.
//
var hexcase = 0; // hex output format. 0 - lowercase; 1 - uppercase
var b64pad = ""; // base-64 pad character. "=" for strict RFC compliance
//
// These are the functions you'll usually want to call
// They take string arguments and return either hex or base-64 encoded strings
//
function b64_sha1(s) { return rstr2b64(rstr_sha1(str2rstr_utf8(s))); }
function any_sha1(s, e) { return rstr2any(rstr_sha1(str2rstr_utf8(s)), e); }
function hex_hmac_sha1(k, d)
{ return rstr2hex(rstr_hmac_sha1(str2rstr_utf8(k), str2rstr_utf8(d))); }
function b64_hmac_sha1(k, d)
{ return rstr2b64(rstr_hmac_sha1(str2rstr_utf8(k), str2rstr_utf8(d))); }
function any_hmac_sha1(k, d, e)
{ return rstr2any(rstr_hmac_sha1(str2rstr_utf8(k), str2rstr_utf8(d)), e); }
//
// Perform a simple self-test to see if the VM is working
//
function sha1_vm_test()
{
return hex_sha1("abc").toLowerCase() == "a9993e364706816aba3e25717850c26c9cd0d89d";
}
//
// Calculate the SHA1 of a raw string
//
function rstr_sha1(s)
{
return binb2rstr(binb_sha1(rstr2binb(s), s.length * 8));
}
//
// Calculate the HMAC-SHA1 of a key and some data (raw strings)
//
function rstr_hmac_sha1(key, data)
{
var bkey = rstr2binb(key);
if(bkey.length > 16) bkey = binb_sha1(bkey, key.length * 8);
var ipad = Array(16), opad = Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = binb_sha1(ipad.concat(rstr2binb(data)), 512 + data.length * 8);
return binb2rstr(binb_sha1(opad.concat(hash), 512 + 160));
}
//
// Convert a raw string to a hex string
//
function rstr2hex(input)
{
try { hexcase } catch(e) { hexcase=0; }
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var output = "";
var x;
for(var i = 0; i < input.length; i++)
{
x = input.charCodeAt(i);
output += hex_tab.charAt((x >>> 4) & 0x0F)
+ hex_tab.charAt( x & 0x0F);
}
return output;
}
//
// Convert a raw string to a base-64 string
//
function rstr2b64(input)
{
try { b64pad } catch(e) { b64pad=''; }
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var output = "";
var len = input.length;
for(var i = 0; i < len; i += 3)
{
var triplet = (input.charCodeAt(i) << 16)
| (i + 1 < len ? input.charCodeAt(i+1) << 8 : 0)
| (i + 2 < len ? input.charCodeAt(i+2) : 0);
for(var j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > input.length * 8) output += b64pad;
else output += tab.charAt((triplet >>> 6*(3-j)) & 0x3F);
}
}
return output;
}
//
// Convert a raw string to an arbitrary string encoding
//
function rstr2any(input, encoding)
{
var divisor = encoding.length;
var remainders = Array();
var i, q, x, quotient;
// Convert to an array of 16-bit big-endian values, forming the dividend
var dividend = Array(Math.ceil(input.length / 2));
for(i = 0; i < dividend.length; i++)
{
dividend[i] = (input.charCodeAt(i * 2) << 8) | input.charCodeAt(i * 2 + 1);
}
//
// Repeatedly perform a long division. The binary array forms the dividend,
// the length of the encoding is the divisor. Once computed, the quotient
// forms the dividend for the next step. We stop when the dividend is zero.
// All remainders are stored for later use.
//
while(dividend.length > 0)
{
quotient = Array();
x = 0;
for(i = 0; i < dividend.length; i++)
{
x = (x << 16) + dividend[i];
q = Math.floor(x / divisor);
x -= q * divisor;
if(quotient.length > 0 || q > 0)
quotient[quotient.length] = q;
}
remainders[remainders.length] = x;
dividend = quotient;
}
// Convert the remainders to the output string
var output = "";
for(i = remainders.length - 1; i >= 0; i--)
output += encoding.charAt(remainders[i]);
// Append leading zero equivalents
var full_length = Math.ceil(input.length * 8 /
(Math.log(encoding.length) / Math.log(2)))
for(i = output.length; i < full_length; i++)
output = encoding[0] + output;
return output;
}
//
// Encode a string as utf-8.
// For efficiency, this assumes the input is valid utf-16.
//
function str2rstr_utf8(input)
{
var output = "";
var i = -1;
var x, y;
while(++i < input.length)
{
// Decode utf-16 surrogate pairs
x = input.charCodeAt(i);
y = i + 1 < input.length ? input.charCodeAt(i + 1) : 0;
if(0xD800 <= x && x <= 0xDBFF && 0xDC00 <= y && y <= 0xDFFF)
{
x = 0x10000 + ((x & 0x03FF) << 10) + (y & 0x03FF);
i++;
}
// Encode output as utf-8
if(x <= 0x7F)
output += String.fromCharCode(x);
else if(x <= 0x7FF)
output += String.fromCharCode(0xC0 | ((x >>> 6 ) & 0x1F),
0x80 | ( x & 0x3F));
else if(x <= 0xFFFF)
output += String.fromCharCode(0xE0 | ((x >>> 12) & 0x0F),
0x80 | ((x >>> 6 ) & 0x3F),
0x80 | ( x & 0x3F));
else if(x <= 0x1FFFFF)
output += String.fromCharCode(0xF0 | ((x >>> 18) & 0x07),
0x80 | ((x >>> 12) & 0x3F),
0x80 | ((x >>> 6 ) & 0x3F),
0x80 | ( x & 0x3F));
}
return output;
}
//
// Encode a string as utf-16
//
function str2rstr_utf16le(input)
{
var output = "";
for(var i = 0; i < input.length; i++)
output += String.fromCharCode( input.charCodeAt(i) & 0xFF,
(input.charCodeAt(i) >>> 8) & 0xFF);
return output;
}
function str2rstr_utf16be(input)
{
var output = "";
for(var i = 0; i < input.length; i++)
output += String.fromCharCode((input.charCodeAt(i) >>> 8) & 0xFF,
input.charCodeAt(i) & 0xFF);
return output;
}
//
// Convert a raw string to an array of big-endian words
// Characters >255 have their high-byte silently ignored.
//
function rstr2binb(input)
{
var output = Array(input.length >> 2);
for(var i = 0; i < output.length; i++)
output[i] = 0;
for(var i = 0; i < input.length * 8; i += 8)
output[i>>5] |= (input.charCodeAt(i / 8) & 0xFF) << (24 - i % 32);
return output;
}
//
// Convert an array of big-endian words to a string
//
function binb2rstr(input)
{
var output = "";
for(var i = 0; i < input.length * 32; i += 8)
output += String.fromCharCode((input[i>>5] >>> (24 - i % 32)) & 0xFF);
return output;
}
//
// Calculate the SHA-1 of an array of big-endian words, and a bit length
//
function binb_sha1(x, len)
{
// append padding
x[len >> 5] |= 0x80 << (24 - len % 32);
x[((len + 64 >> 9) << 4) + 15] = len;
var w = Array(80);
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var e = -1009589776;
for(var i = 0; i < x.length; i += 16)
{
var olda = a;
var oldb = b;
var oldc = c;
var oldd = d;
var olde = e;
for(var j = 0; j < 80; j++)
{
if(j < 16) w[j] = x[i + j];
else w[j] = bit_rol(w[j-3] ^ w[j-8] ^ w[j-14] ^ w[j-16], 1);
var t = safe_add(safe_add(bit_rol(a, 5), sha1_ft(j, b, c, d)),
safe_add(safe_add(e, w[j]), sha1_kt(j)));
e = d;
d = c;
c = bit_rol(b, 30);
b = a;
a = t;
}
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
e = safe_add(e, olde);
}
return Array(a, b, c, d, e);
}
//
// Perform the appropriate triplet combination function for the current
// iteration
//
function sha1_ft(t, b, c, d)
{
if(t < 20) return (b & c) | ((~b) & d);
if(t < 40) return b ^ c ^ d;
if(t < 60) return (b & c) | (b & d) | (c & d);
return b ^ c ^ d;
}
//
// Determine the appropriate additive constant for the current iteration
//
function sha1_kt(t)
{
return (t < 20) ? 1518500249 : (t < 40) ? 1859775393 :
(t < 60) ? -1894007588 : -899497514;
}
//
// Add integers, wrapping at 2^32. This uses 16-bit operations internally
// to work around bugs in some JS interpreters.
//
function safe_add(x, y)
{
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
//
// Bitwise rotate a 32-bit number to the left.
//
function bit_rol(num, cnt)
{
return (num << cnt) | (num >>> (32 - cnt));
}
return rstr2hex(rstr_sha1(str2rstr_utf8(s)));
}-*/;
}
I'm using it in my client side sha generation and it worked well.