How to access the results of a Magma Routine - lapack

I am trying to access the results of the eigenvalue decomposition of a general real matrix, using the magma_sgeev routine. My code is as follows -
#include <cstdlib>
#include <iostream>
#include <map>
#include <typeindex>
#include <typeinfo>
#include <magma_v2.h>
#include <random>
#define N 10
#define LDA N
#define LDVL N
#define LDVR N
/* Main program */
int main()
{
magma_init();
/* Locals */
int n = N, lda = LDA, ldvl = LDVL, ldvr = LDVR, info, lwork;
float wkopt;
float *work;
/* Local arrays */
float wr[N], wi[N], vl[LDVL * N], vr[LDVR * N];
float *a = (float *)malloc(LDA * N * sizeof(float));
for (int i = 0; i < LDA * N; i++)
a[i] = rand();
printf("Done populating matrix\n");
lwork = -1;
magma_sgeev_m(MagmaNoVec, MagmaNoVec, n, a, lda, wr, wi, vl, ldvl, vr, ldvr,
work, lwork, &info);
lwork = (int)work[0];
work = (float *)malloc(lwork * sizeof(float));
printf("%d\n", lwork);
magma_sgeev_m(MagmaNoVec, MagmaNoVec, n, a, lda, wr, wi, vl, ldvl, vr, ldvr,
work, lwork, &info);
for (int i = 0; i < N; i++)
printf("%f ", wr[i]);
std::cout << std::endl;
for (int i = 0; i < N; i++)
printf("%f ", wi[i]);
printf("\n");
if (info > 0)
{
printf("The algorithm failed to compute eigenvalues.\n");
exit(1);
}
exit(0);
magma_finalize();
}
As the documentation says, the arrays wr and wi hold the results of the computation. However, there are two issues with this code,
When I try to access the wr and wi, I get a segmentation fault. I am not aware of the inner workings of the magma library, what am I missing, and how do I fix it ?
When I increase the size of the matrix, for example from 5x5 to 10x10. I get a segmentation fault in the first call of magma_sgeev itself. What am I missing
TIA

Related

Converting and an arma::mat adjacency matrix into an igraph graph in C (Rcpp)

I use Armadillo objects in some (Rcpp) code where I work with matrices.
The matrices are adjacency matrices and I need to quickly compute the components of the underlying network and though I could do this via igraph.
But I fail already at converting the adjacency matrix into something that can be used with igraph.
#include <RcppArmadillo.h>
#include <iostream>
#include <igraph-0.7.1\include\igraph.h>
using namespace arma;
// [[Rcpp::depends(RcppArmadillo)]]
// [[Rcpp::export]]
vec component_membership(const mat& adjacencymatrix) {
igraph_t g;
igraph_adjacency(&g,&adjacencymatrix,IGRAPH_ADJ_DIRECTED);
// here is more code that is immaterial to my problem
}
On compilation it complains
cannot convert 'const mat* {aka const arma::Mat<double>*}' to
'igraph_matrix_t*' for argument '2' to
'int igraph_adjacency(igraph_t*, igraph_matrix_t*, igraph_adjacency_t)'
I understand why that is the case: I believe igraph_matrix_t and arma::matrix must be fundamentally different data types. How can I convert, i.e., but how do i fix this easily?
As you suspected, igraph_matrix_t and arma::matrix are completely different types. The igraph documentation lists no methods that would make use of a C array for constructing an igraph_matrix_t, so I think one has to do it by hand. Something like this might work (totally untested!):
igraph_matrix_t *m;
int rc = igraph_matrix_init(m, mat.n_rows, mat.n_cols);
for (unsigned long j = 0; j < mat.n_cols; ++j)
for (unsigned long i = 0; i < mat.n_rows; ++i)
igraph_matrix_set(m, i, j, mat(i, j));
Following #Ralf_Stubner's suggestion I ended up using the following code. Not sure it is smart, I thought I'd share it anyways
void armamat_to_igraph_matrix(const mat &x_in, igraph_matrix_t *x_out) {
igraph_matrix_init(x_out, x_in.n_rows, x_in.n_cols);
for (unsigned long j = 0; j < x_in.n_cols; ++j)
for (unsigned long i = 0; i < x_in.n_rows; ++i)
igraph_matrix_set(x_out, i, j, x_in(i, j));
return;
}
void igraph_vector_to_armauvec(const igraph_vector_t *x_in, uvec &x_out) {
x_out = uvec(igraph_vector_size(x_in));
for (unsigned long j = 0; j < igraph_vector_size(x_in); ++j)
x_out(j) = igraph_vector_e(x_in,j);
return;
}
void igraph_vector_to_armavec(const igraph_vector_t *x_in, vec &x_out) {
x_out = vec(igraph_vector_size(x_in));
for (unsigned long j = 0; j < igraph_vector_size(x_in); ++j)
x_out(j) = igraph_vector_e(x_in,j);
return;
}

Recursively use of self-implemented cuIDFT.cu leads to changing output every time when re-runing the code

I have implemented a CUDA version of inverse discrete cosine transform (IDCT), by "translating" the MATLAB built-in function idct.m into CUDA:
My implementation is cuIDCT.cu, works when m = n and both m and n are even numbers.
cuIDCT.cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cufft.h>
#include <cuComplex.h>
// round up n/m
inline int iDivUp(int n, int m)
{
return (n + m - 1) / m;
}
typedef cufftComplex complex;
#define PI 3.1415926535897932384626433832795028841971693993751
__global__
void idct_ComputeWeightsKernel(const int n, complex *ww)
{
const int pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos >= n) return;
ww[pos].x = sqrtf(2*n) * cosf(pos*PI/(2*n));
ww[pos].y = sqrtf(2*n) * sinf(pos*PI/(2*n));
}
__global__
void idct_ComputeEvenKernel(const float *b, const int n, const int m, complex *ww, complex *y)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
// Compute precorrection factor
ww[0].x = ww[0].x / sqrtf(2);
ww[0].y = ww[0].y / sqrtf(2);
y[iy + ix*m].x = ww[iy].x * b[pos];
y[iy + ix*m].y = ww[iy].y * b[pos];
}
__global__
void Reordering_a0_Kernel(complex *y, const int n, const int m, complex *yy)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
yy[iy + ix*n].x = y[pos].x / (float) n;
yy[iy + ix*n].y = y[pos].y / (float) n;
}
__global__
void Reordering_a_Kernel(complex *yy, const int n, const int m, float *a)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
// Re-order elements of each column according to equations (5.93) and (5.94) in Jain
if (iy < n/2)
{
a[ix + 2*iy*n] = yy[pos].x;
a[ix + (2*iy+1)*n] = yy[ix + (m-iy-1)*n].x;
}
}
/**
* a = idct(b), where a is of size [n m].
* #param b, input array
* #param n, first dimension of a
* #param m, second dimension of a
* #param a, output array
*/
void cuIDCT(float *h_in, int n, int m, float *h_out) // a is of size [n m]
{
const int data_size = n * m * sizeof(float);
// device memory allocation
float *d_in, *d_out;
cudaMalloc(&d_in, data_size);
cudaMalloc(&d_out, data_size);
// transfer data from host to device
cudaMemcpy(d_in, h_in, data_size, cudaMemcpyHostToDevice);
// compute IDCT using CUDA
// begin============================================
// Compute weights
complex *ww;
cudaMalloc(&ww, n*sizeof(complex));
dim3 threads(256);
dim3 blocks(iDivUp(n, threads.x));
idct_ComputeWeightsKernel<<<blocks, threads>>>(n, ww);
complex *y;
complex *yy;
cufftHandle plan;
dim3 threads1(32, 6);
dim3 blocks2(iDivUp(n, threads1.x), iDivUp(m, threads1.y)); // for even case
int Length[1] = {m}; // for each IFFT, the length is m
cudaMalloc(&y, n*m*sizeof(complex));
idct_ComputeEvenKernel<<<blocks2, threads1>>>(d_in, n, m, ww, y);
cufftPlanMany(&plan, 1, Length,
Length, 1, m,
Length, 1, m, CUFFT_C2C, n);
cufftExecC2C(plan, y, y, CUFFT_INVERSE); // y is of size [n m]
cudaMalloc(&yy, n*m*sizeof(complex));
Reordering_a0_Kernel<<<blocks2, threads1>>>(y, n, m, yy);
Reordering_a_Kernel<<<blocks2, threads1>>>(yy, n, m, d_out);
// end============================================
// transfer result from device to host
cudaMemcpy(h_out, d_out, data_size, cudaMemcpyDeviceToHost);
// cleanup
cufftDestroy(plan);
cudaFree(ww);
cudaFree(y);
cudaFree(yy);
cudaFree(d_in);
cudaFree(d_out);
}
Then I compared the result of my CUDA IDCT (i.e. cuIDCT.cu) against MATLAB idct.m using following code:
a test main.cpp function, and
a MATLAB main function main.m to read result from CUDA and compare it against MATLAB.
main.cpp
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <helper_functions.h>
#include <stdlib.h>
#include <stdio.h>
// N must equal to M, and both must be even numbers
#define N 256
#define M 256
void WriteDataFile(const char *name, int w, int h, const float *in, const float *out)
{
FILE *stream;
stream = fopen(name, "wb");
float data = 202021.25f;
fwrite(&data, sizeof(float), 1, stream);
fwrite(&w, sizeof(w), 1, stream);
fwrite(&h, sizeof(h), 1, stream);
for (int i = 0; i < h; i++)
for (int j = 0; j < w; j++)
{
const int pos = j + i * h;
fwrite(in + pos, sizeof(float), 1, stream);
fwrite(out + pos, sizeof(float), 1, stream);
}
fclose(stream);
}
void cuIDCT(float *b, int n, int m, float *a);
int main()
{
// host memory allocation
float *h_in = new float [N * M];
float *h_out = new float [N * M];
float *h_temp = new float [N * M];
// input data initialization
for (int i = 0; i < N * M; i++)
{
h_in[i] = (float)rand()/(float)RAND_MAX;
h_out[i] = h_in[i];
h_temp[i] = h_in[i];
}
// please comment either one case for testing
// test case 1: use cuIDCT.cu once
// cuIDCT(h_in, N, M, h_out);
// test case 2: iteratively use cuIDCT.cu
for (int i = 0; i < 4; i++)
{
if (i % 2 == 0)
cuIDCT(h_out, N, M, h_temp);
else
cuIDCT(h_temp, N, M, h_out);
}
// write data, for further visualization using MATLAB
WriteDataFile("test.flo", N, M, h_in, h_out);
// cleanup
delete [] h_in;
delete [] h_out;
delete [] h_temp;
cudaDeviceReset();
}
main.m
clc;clear;
% read
[h_in, h_out] = read_data('test.flo');
% MATLAB result, for test case 1, comment the for-loop
matlab_out = h_in;
for i = 1:4
matlab_out = idct(matlab_out);
end
% compare
err = matlab_out - h_out;
% show
figure(1);
subplot(221); imshow(h_in, []); title('h\_in'); colorbar
subplot(222); imshow(h_out, []); title('h\_out'); colorbar
subplot(223); imshow(matlab_out, []); title('matlab\_out'); colorbar
subplot(224); imshow(err, []); title('error map'); colorbar
disp(['maximum error between CUDA and MATLAB is ' ...
num2str(max(max(abs(err))))])
I ran the code on Visual Studio 11 (i.e. VS2012) in Windows 7 with Nvidia GPU Tesla K20c, using CUDA Toolkit version 7.5, and my MATLAB version is R2015b.
My test steps:
For test case 1. Un-comment test case 1 and comment test case 2.
Run main.cpp.
Run main.m in MATLAB.
Repeat step 1 and step 2 (without any change, just re-run the code).
I repeated step 3 for 20 times. The output result is unchanged, and results in main.m are:
results of test case 1
The maximum error is 7.7152e-07.
For test case 2. Un-comment test case 2 and comment test case 1.
Run main.cpp.
Run main.m in MATLAB.
Repeat step 1 and step 2 (without any change, just re-run the code).
I repeated step 3 for 20 times. The output result is changed, and results in main.m are (not enough reputation to put all images, only wrong case is shown below):
one situation (the wrong one) of test case 2
The maximum error is 0.45341 (2 times), 0.44898 (1 time), 0.26186 (1 time), 0.26301 (1 time), and 9.5716e-07 (15 times).
From the test results, my conclusion is:
From test case 1: cuIDCT.cu is numerically correct (error ~10^-7) to idct.m.
From test case 2: recursively use of cuIDCT.cu leads to unstable result (i.e. the output changes every time when re-run the code and may sometimes be numerically wrong, error ~0.1)
My question:
From test case 1 we know cuIDCT.cu is numerically correct to idct.m. But why recursiviely use of cuIDCT.cu leads to different output result each time when re-run the code?
Any helps or suggestions are highly appreciated.
I believe the variability in your results is coming about due to this code in your idct_ComputeEvenKernel:
// Compute precorrection factor
ww[0].x = ww[0].x / sqrtf(2);
ww[0].y = ww[0].y / sqrtf(2);
It's not entirely clear what your intent is here, but it's doubtful that this code could be doing what you want. You may be confused about the CUDA execution model.
The above code will be executed by every CUDA thread that you launch for that kernel that passes the thread check:
if (ix >= n || iy >= m) return;
I believe this means 65536 threads will all execute this code in that kernel. Furthermore, the threads will execute that code in more-or-less any order (not all CUDA threads execute in lock-step). They may even step on each other as they are trying to write out their values to the location ww[0]. So the final result in ww[0] will be quite unpredictable.
When I comment out those lines of code, the results become stable for me (albeit different from what they were with those lines in place), unchanging from run to run.
I'd like to point something else out. Wherever you are calculating the .x and .y values of a complex quantity, my suggestion would be to rework the code from this (for example):
y[iy + ix*m].x = ww[iy].x * b[pos];
y[iy + ix*m].y = ww[iy].y * b[pos];
to this:
complex temp1, temp2;
temp1 = ww[iy];
temp2.x = temp1.x * b[pos];
temp2.y = temp2.y * b[pos];
y[iy + ix*m] = temp2;
At least according to my testing, the compiler doesn't seem to be making this optimization for you, and one side-effect benefit is that it's much easier to test your code with cuda-memcheck --tool initcheck .... In the first realization, the compiler will load y[iy + ix*m] as an 8 byte quantity, modify either 4 or 8 bytes of it, then store y[iy + ix*m] as an 8 byte quantity. The second realization should be more efficient (it eliminates the load of y[]), and eliminates the load of an uninitialized quantity (y[]), which the cuda-memcheck tool will report as a hazard.
This variability I'm describing should be possible whether you run either the 1-pass version of your code or the 4-pass version of your code. Therefore I think your statements about the 1-pass version being correct are suspect. I think if you run the 1-pass version enough, you will eventually see variability (although it may require varying initial memory conditions, or running on different GPU types). Even in your own results, we see that 15 out of 20 runs of the 4 pass code produce "correct" results, i.e. the residual error is ~1e-7
Here's my modified cuIDCT.cu file, modified from the version you posted here. The assumption I'm making below is that you wanted to compute the scaling on ww[0] only once, in which case we can easily handle that arithmetic as an addendum to the previous idct_ComputeWeightsKernel:
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cufft.h>
#include <cuComplex.h>
#include <helper_cuda.h>
#include "assert.h"
// round up n/m
inline int iDivUp(int n, int m)
{
return (n + m - 1) / m;
}
typedef cufftComplex complex;
#define PI 3.1415926535897932384626433832795028841971693993751
#define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__)
inline void __cufftSafeCall(cufftResult err, const char *file, const int line)
{
if( CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
__global__
void idct_ComputeWeightsKernel(const int n, complex *ww)
{
const int pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos >= n) return;
complex temp;
temp.x = sqrtf(2*n) * cosf(pos*PI/(2*n));
temp.y = sqrtf(2*n) * sinf(pos*PI/(2*n));
if (pos == 0) {
temp.x /= sqrtf(2);
temp.y /= sqrtf(2);}
ww[pos] = temp;
}
__global__
void idct_ComputeEvenKernel(const float *b, const int n, const int m, complex *ww, complex *y)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
/* handle this in idct_ComputeWeightsKernel
// Compute precorrection factor
ww[0].x = ww[0].x / sqrtf(2);
ww[0].y = ww[0].y / sqrtf(2);
*/
complex temp1, temp2;
temp1 = ww[iy];
temp2.x = temp1.x * b[pos];
temp2.y = temp1.y * b[pos];
y[iy + ix*m] = temp2;
}
__global__
void Reordering_a0_Kernel(complex *y, const int n, const int m, complex *yy)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
complex temp1, temp2;
temp1 = y[pos];
temp2.x = temp1.x / (float) n;
temp2.y = temp1.y / (float) n;
yy[iy + ix*n] = temp2;
}
__global__
void Reordering_a_Kernel(complex *yy, const int n, const int m, float *a)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
// Re-order elements of each column according to equations (5.93) and (5.94) in Jain
if (iy < n/2)
{
a[ix + 2*iy*n] = yy[pos].x;
a[ix + (2*iy+1)*n] = yy[ix + (m-iy-1)*n].x;
}
}
/**
* a = idct(b), where a is of size [n m].
* #param b, input array
* #param n, first dimension of a
* #param m, second dimension of a
* #param a, output array
*/
void cuIDCT(float *h_in, int n, int m, float *h_out) // a is of size [n m]
{
const int data_size = n * m * sizeof(float);
// device memory allocation
float *d_in, *d_out;
checkCudaErrors(cudaMalloc(&d_in, data_size));
checkCudaErrors(cudaMalloc(&d_out, data_size));
// transfer data from host to device
checkCudaErrors(cudaMemcpy(d_in, h_in, data_size, cudaMemcpyHostToDevice));
// compute IDCT using CUDA
// begin============================================
// Compute weights
complex *ww;
checkCudaErrors(cudaMalloc(&ww, n*sizeof(complex)));
dim3 threads(256);
dim3 blocks(iDivUp(n, threads.x));
idct_ComputeWeightsKernel<<<blocks, threads>>>(n, ww);
complex *y;
complex *yy;
cufftHandle plan;
dim3 threads1(32, 6);
dim3 blocks2(iDivUp(n, threads1.x), iDivUp(m, threads1.y)); // for even case
int Length[1] = {m}; // for each IFFT, the length is m
checkCudaErrors(cudaMalloc(&y, n*m*sizeof(complex)));
idct_ComputeEvenKernel<<<blocks2, threads1>>>(d_in, n, m, ww, y);
cufftSafeCall(cufftPlanMany(&plan, 1, Length,
Length, 1, m,
Length, 1, m, CUFFT_C2C, n));
cufftSafeCall(cufftExecC2C(plan, y, y, CUFFT_INVERSE)); // y is of size [n m]
checkCudaErrors(cudaMalloc(&yy, n*m*sizeof(complex)));
Reordering_a0_Kernel<<<blocks2, threads1>>>(y, n, m, yy);
cudaMemset(d_out, 0, data_size);
Reordering_a_Kernel<<<blocks2, threads1>>>(yy, n, m, d_out);
// end============================================
// transfer result from device to host
checkCudaErrors(cudaMemcpy(h_out, d_out, data_size, cudaMemcpyDeviceToHost));
// cleanup
cufftDestroy(plan);
checkCudaErrors(cudaFree(ww));
checkCudaErrors(cudaFree(y));
checkCudaErrors(cudaFree(yy));
checkCudaErrors(cudaFree(d_in));
checkCudaErrors(cudaFree(d_out));
}
You'll note I threw an extra cudaMemset on d_out in there, because it helped me clean up an issue with cuda-memcheck --tool initcheck .... It shouldn't be necessary, you can delete it if you want.

lapack tridiag - eigenvectors wrong sign

I'm using the lapack function tridiag to find the eigenvectors of the hamiltonian in a simulation of the infinite square well, and I seem to randomly get a wrong sign (for p=4 and 6 in this case) :
red is the theory and green the simulation
(they're also not normalized, but I think it's normal ?)
Here is the code :
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
extern "C"
void dsteqr_( char * , int *, double [], double [], double [], int *, double [],
int * ) ;
void tridiag( int n, double d[], double e[], double z[] )
/* Appel du sous-programme dsteqr de Lapack : calcul des valeurs et vecteurs
propres d'une matrice tridiagonale symmetrique */
{
int info ; // diagnostic
double work[2*n-2] ; // espace de travail
char compz='v' ; // mettre 'n' pour n'avoir que les valeurs propres
dsteqr_( &compz, &n, d, e, z, &n, work, &info ) ; // scalaires pointeurs
if ( info == 0 ) cout << "La diagonalisation s'est bien passée" << endl ;
else cout << "Attention ! SSTEQR : info =" << info << endl ;
}
double V(double x){
return 0;
}
int main() {
int n=100;
double d[n], e[n-1], z[n*n], x[n], v[n], L = 5, delta_x = L/n;
ofstream of1("eigenvalues.dat"), of2("eigenvectors.dat");
for(int i=0; i<n; i++){
for (int j=0; j<n; j++){
if (i == j) z[i+n*j] = 1;
else z[i+n*j] = 0;
}
x[i] = (i+0.5)*delta_x - 0.5*L;
v[i] = V(x[i]);
d[i] = 2/pow(delta_x, 2) + v[i];
if (i<n-2){
e[i] = -1/pow(delta_x, 2);
}
}
tridiag(n,d,e,z);
for (int i=0; i<n; i++){
of1 << d[i] << endl;
for (int j=0; j<n; j++){
of2 << z[i+n*j] << " ";
}
of2 << x[i] << endl;
}
of1.close();
of2.close();
return 0;
}
So does anyone know what is happening ?
The magnitude of your numerical result being larger than the magnitude of the reference result is due to the fact that L=5. The normalization of LAPACK would correspond to L=1. To get the correct vectors, divide all values by L !
Regarding the sign... If you change the signs of vectors in an orthonormal basis, the result is still an orthonormal basis. Moreover if A is an Hermitian matrix and if x is a vector so that A.x=l.x where l is a scalar, then A.(-x)=l.(-x). So the sign of the vector you get is not very significant... Moreover, if the matrix features many times the same eigenvalue, the result can contain any of the orthonormal basis of the subspace associated to this eigenvalue. For instance, if the matrix A is the identity, any orthonormal basis is an acceptable result for dsteqr().

Scaling in inverse FFT by cuFFT

Whenever I'm plotting the values obtained by a programme using the cuFFT and comparing the results with that of Matlab, I'm getting the same shape of graphs and the values of maxima and minima are getting at the same points. However, the values resulting by the cuFFT are much greater than those resulting from Matlab. The Matlab code is
fs = 1000; % sample freq
D = [0:1:4]'; % pulse delay times
t = 0 : 1/fs : 4000/fs; % signal evaluation time
w = 0.5; % width of each pulse
yp = pulstran(t,D,'rectpuls',w);
filt = conj(fliplr(yp));
xx = fft(yp,1024).*fft(filt,1024);
xx = (abs(ifft(xx)));
and the CUDA code with the same input is like:
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD);
cufftExecC2C(plan, (cufftComplex *)d_filter_signal, (cufftComplex *)d_filter_signal, CUFFT_FORWARD);
ComplexPointwiseMul<<<blocksPerGrid, threadsPerBlock>>>(d_signal, d_filter_signal, NX);
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE);
The cuFFT performs also a 1024 points FFT with batch size of 2.
With the scaling factor of NX=1024, the values are not coming correct. Please tell what to do.
This is a late answer to remove this question from the unanswered list.
You are not giving enough information to diagnose your problem, since you are missing to specify the way you are setting up the cuFFT plan. You are even not specifying whether you have exactly the same shape for the Matlab's and cuFFT's signals (so you have just a scaling) or you have approximately the same shape. However, let me make the following two observations:
The yp vector has 4000 elements; opposite to thatm by fft(yp,1024), you are performing an FFT by truncating the signal to 1024 elements;
The inverse cuFFT does not perform the scaling by the number of vector elements.
For the sake of convenience (it could be useful to other users), I'm reporting below a simple FFT-IFFT scheme which includes also the scaling performed by using the CUDA Thrust library.
#include <cufft.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
/*********************/
/* SCALE BY CONSTANT */
/*********************/
class Scale_by_constant
{
private:
float c_;
public:
Scale_by_constant(float c) { c_ = c; };
__host__ __device__ float2 operator()(float2 &a) const
{
float2 output;
output.x = a.x / c_;
output.y = a.y / c_;
return output;
}
};
int main(void){
const int N=4;
// --- Setting up input device vector
thrust::device_vector<float2> d_vec(N,make_cuComplex(1.f,2.f));
cufftHandle plan;
cufftPlan1d(&plan, N, CUFFT_C2C, 1);
// --- Perform in-place direct Fourier transform
cufftExecC2C(plan, thrust::raw_pointer_cast(d_vec.data()),thrust::raw_pointer_cast(d_vec.data()), CUFFT_FORWARD);
// --- Perform in-place inverse Fourier transform
cufftExecC2C(plan, thrust::raw_pointer_cast(d_vec.data()),thrust::raw_pointer_cast(d_vec.data()), CUFFT_INVERSE);
thrust::transform(d_vec.begin(), d_vec.end(), d_vec.begin(), Scale_by_constant((float)(N)));
// --- Setting up output host vector
thrust::host_vector<float2> h_vec(d_vec);
for (int i=0; i<N; i++) printf("Element #%i; Real part = %f; Imaginary part: %f\n",i,h_vec[i].x,h_vec[i].y);
getchar();
}
With the introduction of the cuFFT callback feature, the normalization required by the inverse FFT performed by the cuFFT can be embedded directly within the cufftExecC2C call by defining the normalization operation as a __device__ function.
Besides the cuFFT User Guide, for the cuFFT callback features, see
CUDA Pro Tip: Use cuFFT Callbacks for Custom Data Processing
Below is an example of implementing the IFFT normalization by cuFFT callback.
#include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftXt.h>
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*********************/
/* CUFFT ERROR CHECK */
/*********************/
// See http://stackoverflow.com/questions/16267149/cufft-error-handling
#ifdef _CUFFT_H_
static const char *_cudaGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
#endif
#define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__)
inline void __cufftSafeCall(cufftResult err, const char *file, const int line)
{
if( CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
__device__ void IFFT_Scaling(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr) {
float *scaling_factor = (float*)callerInfo;
float2 output;
output.x = cuCrealf(element);
output.y = cuCimagf(element);
output.x = output.x / scaling_factor[0];
output.y = output.y / scaling_factor[0];
((float2*)dataOut)[offset] = output;
}
__device__ cufftCallbackStoreC d_storeCallbackPtr = IFFT_Scaling;
/********/
/* MAIN */
/********/
int main() {
const int N = 16;
cufftHandle plan;
float2 *h_input = (float2*)malloc(N*sizeof(float2));
float2 *h_output1 = (float2*)malloc(N*sizeof(float2));
float2 *h_output2 = (float2*)malloc(N*sizeof(float2));
float2 *d_input; gpuErrchk(cudaMalloc((void**)&d_input, N*sizeof(float2)));
float2 *d_output1; gpuErrchk(cudaMalloc((void**)&d_output1, N*sizeof(float2)));
float2 *d_output2; gpuErrchk(cudaMalloc((void**)&d_output2, N*sizeof(float2)));
float *h_scaling_factor = (float*)malloc(sizeof(float));
h_scaling_factor[0] = 16.0f;
float *d_scaling_factor; gpuErrchk(cudaMalloc((void**)&d_scaling_factor, sizeof(float)));
gpuErrchk(cudaMemcpy(d_scaling_factor, h_scaling_factor, sizeof(float), cudaMemcpyHostToDevice));
for (int i=0; i<N; i++) {
h_input[i].x = 1.0f;
h_input[i].y = 0.f;
}
gpuErrchk(cudaMemcpy(d_input, h_input, N*sizeof(float2), cudaMemcpyHostToDevice));
cufftSafeCall(cufftPlan1d(&plan, N, CUFFT_C2C, 1));
cufftSafeCall(cufftExecC2C(plan, d_input, d_output1, CUFFT_FORWARD));
gpuErrchk(cudaMemcpy(h_output1, d_output1, N*sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Direct transform - %d - (%f, %f)\n", i, h_output1[i].x, h_output1[i].y);
cufftCallbackStoreC h_storeCallbackPtr;
gpuErrchk(cudaMemcpyFromSymbol(&h_storeCallbackPtr, d_storeCallbackPtr, sizeof(h_storeCallbackPtr)));
cufftSafeCall(cufftXtSetCallback(plan, (void **)&h_storeCallbackPtr, CUFFT_CB_ST_COMPLEX, (void **)&d_scaling_factor));
cufftSafeCall(cufftExecC2C(plan, d_output1, d_output2, CUFFT_INVERSE));
gpuErrchk(cudaMemcpy(h_output2, d_output2, N*sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Inverse transform - %d - (%f, %f)\n", i, h_output2[i].x, h_output2[i].y);
cufftSafeCall(cufftDestroy(plan));
gpuErrchk(cudaFree(d_input));
gpuErrchk(cudaFree(d_output1));
gpuErrchk(cudaFree(d_output2));
return 0;
}
EDIT
The "moment" the callback operation is performed is specified by CUFFT_CB_ST_COMPLEX in the call to cufftXtSetCallback. Notice that you can then have load and store callbacks with the same cuFFT plan.
PERFORMANCE
I'm adding a further answer to compare the callback performance with the non-callback version of the same code for this particular case of IFFT scaling. The code I'm using is
#include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftXt.h>
#include <thrust/device_vector.h>
#include "Utilities.cuh"
#include "TimingGPU.cuh"
//#define DISPLAY
/*******************************/
/* THRUST FUNCTOR IFFT SCALING */
/*******************************/
class Scale_by_constant
{
private:
float c_;
public:
Scale_by_constant(float c) { c_ = c; };
__host__ __device__ float2 operator()(float2 &a) const
{
float2 output;
output.x = a.x / c_;
output.y = a.y / c_;
return output;
}
};
/**********************************/
/* IFFT SCALING CALLBACK FUNCTION */
/**********************************/
__device__ void IFFT_Scaling(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr) {
float *scaling_factor = (float*)callerInfo;
float2 output;
output.x = cuCrealf(element);
output.y = cuCimagf(element);
output.x = output.x / scaling_factor[0];
output.y = output.y / scaling_factor[0];
((float2*)dataOut)[offset] = output;
}
__device__ cufftCallbackStoreC d_storeCallbackPtr = IFFT_Scaling;
/********/
/* MAIN */
/********/
int main() {
const int N = 100000000;
cufftHandle plan; cufftSafeCall(cufftPlan1d(&plan, N, CUFFT_C2C, 1));
TimingGPU timerGPU;
float2 *h_input = (float2*)malloc(N*sizeof(float2));
float2 *h_output1 = (float2*)malloc(N*sizeof(float2));
float2 *h_output2 = (float2*)malloc(N*sizeof(float2));
float2 *d_input; gpuErrchk(cudaMalloc((void**)&d_input, N*sizeof(float2)));
float2 *d_output1; gpuErrchk(cudaMalloc((void**)&d_output1, N*sizeof(float2)));
float2 *d_output2; gpuErrchk(cudaMalloc((void**)&d_output2, N*sizeof(float2)));
// --- Callback function parameters
float *h_scaling_factor = (float*)malloc(sizeof(float));
h_scaling_factor[0] = 16.0f;
float *d_scaling_factor; gpuErrchk(cudaMalloc((void**)&d_scaling_factor, sizeof(float)));
gpuErrchk(cudaMemcpy(d_scaling_factor, h_scaling_factor, sizeof(float), cudaMemcpyHostToDevice));
// --- Initializing the input on the host and moving it to the device
for (int i = 0; i < N; i++) {
h_input[i].x = 1.0f;
h_input[i].y = 0.f;
}
gpuErrchk(cudaMemcpy(d_input, h_input, N * sizeof(float2), cudaMemcpyHostToDevice));
// --- Execute direct FFT on the device and move the results to the host
cufftSafeCall(cufftExecC2C(plan, d_input, d_output1, CUFFT_FORWARD));
#ifdef DISPLAY
gpuErrchk(cudaMemcpy(h_output1, d_output1, N * sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Direct transform - %d - (%f, %f)\n", i, h_output1[i].x, h_output1[i].y);
#endif
// --- Execute inverse FFT with subsequent scaling on the device and move the results to the host
timerGPU.StartCounter();
cufftSafeCall(cufftExecC2C(plan, d_output1, d_output2, CUFFT_INVERSE));
thrust::transform(thrust::device_pointer_cast(d_output2), thrust::device_pointer_cast(d_output2) + N, thrust::device_pointer_cast(d_output2), Scale_by_constant((float)(N)));
#ifdef DISPLAY
gpuErrchk(cudaMemcpy(h_output2, d_output2, N * sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Inverse transform - %d - (%f, %f)\n", i, h_output2[i].x, h_output2[i].y);
#endif
printf("Timing NO callback %f\n", timerGPU.GetCounter());
// --- Setup store callback
// timerGPU.StartCounter();
cufftCallbackStoreC h_storeCallbackPtr;
gpuErrchk(cudaMemcpyFromSymbol(&h_storeCallbackPtr, d_storeCallbackPtr, sizeof(h_storeCallbackPtr)));
cufftSafeCall(cufftXtSetCallback(plan, (void **)&h_storeCallbackPtr, CUFFT_CB_ST_COMPLEX, (void **)&d_scaling_factor));
// --- Execute inverse callback FFT on the device and move the results to the host
timerGPU.StartCounter();
cufftSafeCall(cufftExecC2C(plan, d_output1, d_output2, CUFFT_INVERSE));
#ifdef DISPLAY
gpuErrchk(cudaMemcpy(h_output2, d_output2, N * sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Inverse transform - %d - (%f, %f)\n", i, h_output2[i].x, h_output2[i].y);
#endif
printf("Timing callback %f\n", timerGPU.GetCounter());
cufftSafeCall(cufftDestroy(plan));
gpuErrchk(cudaFree(d_input));
gpuErrchk(cudaFree(d_output1));
gpuErrchk(cudaFree(d_output2));
return 0;
}
For such large 1D arrays and simple processing (scaling), the timing on a Kepler K20c is the following
Non-callback 69.029762 ms
Callback 65.868607 ms
So, there is not much improvement. I expect that the improvement one sees is due to avoiding a separate kernel call in the non-callback case. For smaller 1D arrays, there is either no improvement or the non-callback case runs faster.

Hashing 2D, 3D and nD vectors

What are good hashing functions (fast, good distribution, few collisions) for hashing 2d and 3d vectors composed of IEEE 32bit floats. I assume general 3d vectors, but algorithms assuming normals (always in [-1,1]) are also welcome. I also do not fear bit-manipulation as IEEE floats are alsways IEEE floats.
Another more general problem is hashing an Nd float-vector, where N is quite small (3-12) and constant but not known at compile time. At the moment I just take these floats as uints and XOR them together, which is probably not the best solution.
There's a spatial hash function described in Optimized Spatial Hashing for Collision Detection of Deformable Objects. They use the hash function
hash(x,y,z) = ( x p1 xor y p2 xor z
p3) mod n
where p1, p2, p3 are large
prime numbers, in our case 73856093,
19349663, 83492791, respectively. The
value n is the hash table size.
In the paper, x, y, and z are the discretized coordinates; you could probably also use the binary values of your floats.
I have two suggestions.
Assume a grid cell of size l and quantize the x, y and z co-ordinates by computing ix = floor(x/l), iy = floor(y/l), and iz = floor(z/l), where ix, iy and iz are integers. Now use the hash function defined in Optimized Spatial Hashing for Collision Detection of Deformable Objects
If you don't do the quantization, it wont be sensitive to closeness(locality).
Locality Sensitive Hashing has been mentioned for hashing higher dimensional vectors. Why not use them for 3d or 2d vectors as well? A variant of LSH using adapted for Eucledian distance metric (which is what we need for 2d and 3d vectors) is called Locality Sensitive Hashing using p-stable distributions. A very readable tutorial is here.
I wrote this in Python based on the comments seen here,
l = 5
n = 5
p1,p2,p3 = 73856093, 19349663, 83492791
x1 = [33,4,11]
x2 = [31,1,14]
x3 = [10,44,19]
def spatial_hash(x):
ix,iy,iz = np.floor(x[0]/l), np.floor(x[1]/l), np.floor(x[2]/l)
return (int(ix*p1) ^ int(iy*p2) ^ int(iz*p3)) % n
print (spatial_hash(x1))
print (spatial_hash(x2))
print (spatial_hash(x3))
It gives
1
1
3
It seemed to work.
In C++
#include <cstdlib>
#include <iostream>
#include <unordered_map>
#include <vector>
#include <random>
#include <eigen3/Eigen/Dense>
using namespace Eigen;
using namespace std;
const int HASH_SIZE = 200;
//const float MAX = 500.0;
const float L = 0.2f;
const float mmin = -1.f;
const float mmax = 1.f;
unordered_map<int, vector<Vector3d>> map ;
inline size_t hasha(Vector3d &p) {
int ix = (unsigned int)((p[0]+2.f) / L);
int iy = (unsigned int)((p[1]+2.f) / L);
int iz = (unsigned int)((p[2]+2.f) / L);
return (unsigned int)((ix * 73856093) ^ (iy * 19349663) ^ (iz * 83492791)) % HASH_SIZE;
}
int main(int argc, char** argv) {
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-1.0,1.0);
for(size_t i=0;i<300;i++){
float x = distribution(generator);
float y = distribution(generator);
float z = distribution(generator);
Vector3d v(x,y,z);
std::cout << hasha(v) << " " << v[0] << " " << v[1] << " " << v[2] << std::endl;
map[hasha(v)].push_back(v);
vector<Vector3d> entry = map[hasha(v)];
std::cout << "size " << entry.size() << std::endl;
}
for (const auto & [ key, value ] : map) {
cout << key << std::endl;
vector<Vector3d> v = map[key];
float average = 0.0f;
for (int i=0; i<v.size(); i++){
for (int j=0; j<v.size(); j++){
if (i!=j){
Vector3d v1 = v[i];
Vector3d v2 = v[j];
std::cout << " dist " << (v1-v2).norm() << std::endl;
}
}
}
}
}