Scaling in inverse FFT by cuFFT - matlab

Whenever I'm plotting the values obtained by a programme using the cuFFT and comparing the results with that of Matlab, I'm getting the same shape of graphs and the values of maxima and minima are getting at the same points. However, the values resulting by the cuFFT are much greater than those resulting from Matlab. The Matlab code is
fs = 1000; % sample freq
D = [0:1:4]'; % pulse delay times
t = 0 : 1/fs : 4000/fs; % signal evaluation time
w = 0.5; % width of each pulse
yp = pulstran(t,D,'rectpuls',w);
filt = conj(fliplr(yp));
xx = fft(yp,1024).*fft(filt,1024);
xx = (abs(ifft(xx)));
and the CUDA code with the same input is like:
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD);
cufftExecC2C(plan, (cufftComplex *)d_filter_signal, (cufftComplex *)d_filter_signal, CUFFT_FORWARD);
ComplexPointwiseMul<<<blocksPerGrid, threadsPerBlock>>>(d_signal, d_filter_signal, NX);
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE);
The cuFFT performs also a 1024 points FFT with batch size of 2.
With the scaling factor of NX=1024, the values are not coming correct. Please tell what to do.

This is a late answer to remove this question from the unanswered list.
You are not giving enough information to diagnose your problem, since you are missing to specify the way you are setting up the cuFFT plan. You are even not specifying whether you have exactly the same shape for the Matlab's and cuFFT's signals (so you have just a scaling) or you have approximately the same shape. However, let me make the following two observations:
The yp vector has 4000 elements; opposite to thatm by fft(yp,1024), you are performing an FFT by truncating the signal to 1024 elements;
The inverse cuFFT does not perform the scaling by the number of vector elements.
For the sake of convenience (it could be useful to other users), I'm reporting below a simple FFT-IFFT scheme which includes also the scaling performed by using the CUDA Thrust library.
#include <cufft.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
/*********************/
/* SCALE BY CONSTANT */
/*********************/
class Scale_by_constant
{
private:
float c_;
public:
Scale_by_constant(float c) { c_ = c; };
__host__ __device__ float2 operator()(float2 &a) const
{
float2 output;
output.x = a.x / c_;
output.y = a.y / c_;
return output;
}
};
int main(void){
const int N=4;
// --- Setting up input device vector
thrust::device_vector<float2> d_vec(N,make_cuComplex(1.f,2.f));
cufftHandle plan;
cufftPlan1d(&plan, N, CUFFT_C2C, 1);
// --- Perform in-place direct Fourier transform
cufftExecC2C(plan, thrust::raw_pointer_cast(d_vec.data()),thrust::raw_pointer_cast(d_vec.data()), CUFFT_FORWARD);
// --- Perform in-place inverse Fourier transform
cufftExecC2C(plan, thrust::raw_pointer_cast(d_vec.data()),thrust::raw_pointer_cast(d_vec.data()), CUFFT_INVERSE);
thrust::transform(d_vec.begin(), d_vec.end(), d_vec.begin(), Scale_by_constant((float)(N)));
// --- Setting up output host vector
thrust::host_vector<float2> h_vec(d_vec);
for (int i=0; i<N; i++) printf("Element #%i; Real part = %f; Imaginary part: %f\n",i,h_vec[i].x,h_vec[i].y);
getchar();
}

With the introduction of the cuFFT callback feature, the normalization required by the inverse FFT performed by the cuFFT can be embedded directly within the cufftExecC2C call by defining the normalization operation as a __device__ function.
Besides the cuFFT User Guide, for the cuFFT callback features, see
CUDA Pro Tip: Use cuFFT Callbacks for Custom Data Processing
Below is an example of implementing the IFFT normalization by cuFFT callback.
#include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftXt.h>
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*********************/
/* CUFFT ERROR CHECK */
/*********************/
// See http://stackoverflow.com/questions/16267149/cufft-error-handling
#ifdef _CUFFT_H_
static const char *_cudaGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
#endif
#define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__)
inline void __cufftSafeCall(cufftResult err, const char *file, const int line)
{
if( CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
__device__ void IFFT_Scaling(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr) {
float *scaling_factor = (float*)callerInfo;
float2 output;
output.x = cuCrealf(element);
output.y = cuCimagf(element);
output.x = output.x / scaling_factor[0];
output.y = output.y / scaling_factor[0];
((float2*)dataOut)[offset] = output;
}
__device__ cufftCallbackStoreC d_storeCallbackPtr = IFFT_Scaling;
/********/
/* MAIN */
/********/
int main() {
const int N = 16;
cufftHandle plan;
float2 *h_input = (float2*)malloc(N*sizeof(float2));
float2 *h_output1 = (float2*)malloc(N*sizeof(float2));
float2 *h_output2 = (float2*)malloc(N*sizeof(float2));
float2 *d_input; gpuErrchk(cudaMalloc((void**)&d_input, N*sizeof(float2)));
float2 *d_output1; gpuErrchk(cudaMalloc((void**)&d_output1, N*sizeof(float2)));
float2 *d_output2; gpuErrchk(cudaMalloc((void**)&d_output2, N*sizeof(float2)));
float *h_scaling_factor = (float*)malloc(sizeof(float));
h_scaling_factor[0] = 16.0f;
float *d_scaling_factor; gpuErrchk(cudaMalloc((void**)&d_scaling_factor, sizeof(float)));
gpuErrchk(cudaMemcpy(d_scaling_factor, h_scaling_factor, sizeof(float), cudaMemcpyHostToDevice));
for (int i=0; i<N; i++) {
h_input[i].x = 1.0f;
h_input[i].y = 0.f;
}
gpuErrchk(cudaMemcpy(d_input, h_input, N*sizeof(float2), cudaMemcpyHostToDevice));
cufftSafeCall(cufftPlan1d(&plan, N, CUFFT_C2C, 1));
cufftSafeCall(cufftExecC2C(plan, d_input, d_output1, CUFFT_FORWARD));
gpuErrchk(cudaMemcpy(h_output1, d_output1, N*sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Direct transform - %d - (%f, %f)\n", i, h_output1[i].x, h_output1[i].y);
cufftCallbackStoreC h_storeCallbackPtr;
gpuErrchk(cudaMemcpyFromSymbol(&h_storeCallbackPtr, d_storeCallbackPtr, sizeof(h_storeCallbackPtr)));
cufftSafeCall(cufftXtSetCallback(plan, (void **)&h_storeCallbackPtr, CUFFT_CB_ST_COMPLEX, (void **)&d_scaling_factor));
cufftSafeCall(cufftExecC2C(plan, d_output1, d_output2, CUFFT_INVERSE));
gpuErrchk(cudaMemcpy(h_output2, d_output2, N*sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Inverse transform - %d - (%f, %f)\n", i, h_output2[i].x, h_output2[i].y);
cufftSafeCall(cufftDestroy(plan));
gpuErrchk(cudaFree(d_input));
gpuErrchk(cudaFree(d_output1));
gpuErrchk(cudaFree(d_output2));
return 0;
}
EDIT
The "moment" the callback operation is performed is specified by CUFFT_CB_ST_COMPLEX in the call to cufftXtSetCallback. Notice that you can then have load and store callbacks with the same cuFFT plan.

PERFORMANCE
I'm adding a further answer to compare the callback performance with the non-callback version of the same code for this particular case of IFFT scaling. The code I'm using is
#include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftXt.h>
#include <thrust/device_vector.h>
#include "Utilities.cuh"
#include "TimingGPU.cuh"
//#define DISPLAY
/*******************************/
/* THRUST FUNCTOR IFFT SCALING */
/*******************************/
class Scale_by_constant
{
private:
float c_;
public:
Scale_by_constant(float c) { c_ = c; };
__host__ __device__ float2 operator()(float2 &a) const
{
float2 output;
output.x = a.x / c_;
output.y = a.y / c_;
return output;
}
};
/**********************************/
/* IFFT SCALING CALLBACK FUNCTION */
/**********************************/
__device__ void IFFT_Scaling(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr) {
float *scaling_factor = (float*)callerInfo;
float2 output;
output.x = cuCrealf(element);
output.y = cuCimagf(element);
output.x = output.x / scaling_factor[0];
output.y = output.y / scaling_factor[0];
((float2*)dataOut)[offset] = output;
}
__device__ cufftCallbackStoreC d_storeCallbackPtr = IFFT_Scaling;
/********/
/* MAIN */
/********/
int main() {
const int N = 100000000;
cufftHandle plan; cufftSafeCall(cufftPlan1d(&plan, N, CUFFT_C2C, 1));
TimingGPU timerGPU;
float2 *h_input = (float2*)malloc(N*sizeof(float2));
float2 *h_output1 = (float2*)malloc(N*sizeof(float2));
float2 *h_output2 = (float2*)malloc(N*sizeof(float2));
float2 *d_input; gpuErrchk(cudaMalloc((void**)&d_input, N*sizeof(float2)));
float2 *d_output1; gpuErrchk(cudaMalloc((void**)&d_output1, N*sizeof(float2)));
float2 *d_output2; gpuErrchk(cudaMalloc((void**)&d_output2, N*sizeof(float2)));
// --- Callback function parameters
float *h_scaling_factor = (float*)malloc(sizeof(float));
h_scaling_factor[0] = 16.0f;
float *d_scaling_factor; gpuErrchk(cudaMalloc((void**)&d_scaling_factor, sizeof(float)));
gpuErrchk(cudaMemcpy(d_scaling_factor, h_scaling_factor, sizeof(float), cudaMemcpyHostToDevice));
// --- Initializing the input on the host and moving it to the device
for (int i = 0; i < N; i++) {
h_input[i].x = 1.0f;
h_input[i].y = 0.f;
}
gpuErrchk(cudaMemcpy(d_input, h_input, N * sizeof(float2), cudaMemcpyHostToDevice));
// --- Execute direct FFT on the device and move the results to the host
cufftSafeCall(cufftExecC2C(plan, d_input, d_output1, CUFFT_FORWARD));
#ifdef DISPLAY
gpuErrchk(cudaMemcpy(h_output1, d_output1, N * sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Direct transform - %d - (%f, %f)\n", i, h_output1[i].x, h_output1[i].y);
#endif
// --- Execute inverse FFT with subsequent scaling on the device and move the results to the host
timerGPU.StartCounter();
cufftSafeCall(cufftExecC2C(plan, d_output1, d_output2, CUFFT_INVERSE));
thrust::transform(thrust::device_pointer_cast(d_output2), thrust::device_pointer_cast(d_output2) + N, thrust::device_pointer_cast(d_output2), Scale_by_constant((float)(N)));
#ifdef DISPLAY
gpuErrchk(cudaMemcpy(h_output2, d_output2, N * sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Inverse transform - %d - (%f, %f)\n", i, h_output2[i].x, h_output2[i].y);
#endif
printf("Timing NO callback %f\n", timerGPU.GetCounter());
// --- Setup store callback
// timerGPU.StartCounter();
cufftCallbackStoreC h_storeCallbackPtr;
gpuErrchk(cudaMemcpyFromSymbol(&h_storeCallbackPtr, d_storeCallbackPtr, sizeof(h_storeCallbackPtr)));
cufftSafeCall(cufftXtSetCallback(plan, (void **)&h_storeCallbackPtr, CUFFT_CB_ST_COMPLEX, (void **)&d_scaling_factor));
// --- Execute inverse callback FFT on the device and move the results to the host
timerGPU.StartCounter();
cufftSafeCall(cufftExecC2C(plan, d_output1, d_output2, CUFFT_INVERSE));
#ifdef DISPLAY
gpuErrchk(cudaMemcpy(h_output2, d_output2, N * sizeof(float2), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("Inverse transform - %d - (%f, %f)\n", i, h_output2[i].x, h_output2[i].y);
#endif
printf("Timing callback %f\n", timerGPU.GetCounter());
cufftSafeCall(cufftDestroy(plan));
gpuErrchk(cudaFree(d_input));
gpuErrchk(cudaFree(d_output1));
gpuErrchk(cudaFree(d_output2));
return 0;
}
For such large 1D arrays and simple processing (scaling), the timing on a Kepler K20c is the following
Non-callback 69.029762 ms
Callback 65.868607 ms
So, there is not much improvement. I expect that the improvement one sees is due to avoiding a separate kernel call in the non-callback case. For smaller 1D arrays, there is either no improvement or the non-callback case runs faster.

Related

How to access the results of a Magma Routine

I am trying to access the results of the eigenvalue decomposition of a general real matrix, using the magma_sgeev routine. My code is as follows -
#include <cstdlib>
#include <iostream>
#include <map>
#include <typeindex>
#include <typeinfo>
#include <magma_v2.h>
#include <random>
#define N 10
#define LDA N
#define LDVL N
#define LDVR N
/* Main program */
int main()
{
magma_init();
/* Locals */
int n = N, lda = LDA, ldvl = LDVL, ldvr = LDVR, info, lwork;
float wkopt;
float *work;
/* Local arrays */
float wr[N], wi[N], vl[LDVL * N], vr[LDVR * N];
float *a = (float *)malloc(LDA * N * sizeof(float));
for (int i = 0; i < LDA * N; i++)
a[i] = rand();
printf("Done populating matrix\n");
lwork = -1;
magma_sgeev_m(MagmaNoVec, MagmaNoVec, n, a, lda, wr, wi, vl, ldvl, vr, ldvr,
work, lwork, &info);
lwork = (int)work[0];
work = (float *)malloc(lwork * sizeof(float));
printf("%d\n", lwork);
magma_sgeev_m(MagmaNoVec, MagmaNoVec, n, a, lda, wr, wi, vl, ldvl, vr, ldvr,
work, lwork, &info);
for (int i = 0; i < N; i++)
printf("%f ", wr[i]);
std::cout << std::endl;
for (int i = 0; i < N; i++)
printf("%f ", wi[i]);
printf("\n");
if (info > 0)
{
printf("The algorithm failed to compute eigenvalues.\n");
exit(1);
}
exit(0);
magma_finalize();
}
As the documentation says, the arrays wr and wi hold the results of the computation. However, there are two issues with this code,
When I try to access the wr and wi, I get a segmentation fault. I am not aware of the inner workings of the magma library, what am I missing, and how do I fix it ?
When I increase the size of the matrix, for example from 5x5 to 10x10. I get a segmentation fault in the first call of magma_sgeev itself. What am I missing
TIA

Recursively use of self-implemented cuIDFT.cu leads to changing output every time when re-runing the code

I have implemented a CUDA version of inverse discrete cosine transform (IDCT), by "translating" the MATLAB built-in function idct.m into CUDA:
My implementation is cuIDCT.cu, works when m = n and both m and n are even numbers.
cuIDCT.cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cufft.h>
#include <cuComplex.h>
// round up n/m
inline int iDivUp(int n, int m)
{
return (n + m - 1) / m;
}
typedef cufftComplex complex;
#define PI 3.1415926535897932384626433832795028841971693993751
__global__
void idct_ComputeWeightsKernel(const int n, complex *ww)
{
const int pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos >= n) return;
ww[pos].x = sqrtf(2*n) * cosf(pos*PI/(2*n));
ww[pos].y = sqrtf(2*n) * sinf(pos*PI/(2*n));
}
__global__
void idct_ComputeEvenKernel(const float *b, const int n, const int m, complex *ww, complex *y)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
// Compute precorrection factor
ww[0].x = ww[0].x / sqrtf(2);
ww[0].y = ww[0].y / sqrtf(2);
y[iy + ix*m].x = ww[iy].x * b[pos];
y[iy + ix*m].y = ww[iy].y * b[pos];
}
__global__
void Reordering_a0_Kernel(complex *y, const int n, const int m, complex *yy)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
yy[iy + ix*n].x = y[pos].x / (float) n;
yy[iy + ix*n].y = y[pos].y / (float) n;
}
__global__
void Reordering_a_Kernel(complex *yy, const int n, const int m, float *a)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
// Re-order elements of each column according to equations (5.93) and (5.94) in Jain
if (iy < n/2)
{
a[ix + 2*iy*n] = yy[pos].x;
a[ix + (2*iy+1)*n] = yy[ix + (m-iy-1)*n].x;
}
}
/**
* a = idct(b), where a is of size [n m].
* #param b, input array
* #param n, first dimension of a
* #param m, second dimension of a
* #param a, output array
*/
void cuIDCT(float *h_in, int n, int m, float *h_out) // a is of size [n m]
{
const int data_size = n * m * sizeof(float);
// device memory allocation
float *d_in, *d_out;
cudaMalloc(&d_in, data_size);
cudaMalloc(&d_out, data_size);
// transfer data from host to device
cudaMemcpy(d_in, h_in, data_size, cudaMemcpyHostToDevice);
// compute IDCT using CUDA
// begin============================================
// Compute weights
complex *ww;
cudaMalloc(&ww, n*sizeof(complex));
dim3 threads(256);
dim3 blocks(iDivUp(n, threads.x));
idct_ComputeWeightsKernel<<<blocks, threads>>>(n, ww);
complex *y;
complex *yy;
cufftHandle plan;
dim3 threads1(32, 6);
dim3 blocks2(iDivUp(n, threads1.x), iDivUp(m, threads1.y)); // for even case
int Length[1] = {m}; // for each IFFT, the length is m
cudaMalloc(&y, n*m*sizeof(complex));
idct_ComputeEvenKernel<<<blocks2, threads1>>>(d_in, n, m, ww, y);
cufftPlanMany(&plan, 1, Length,
Length, 1, m,
Length, 1, m, CUFFT_C2C, n);
cufftExecC2C(plan, y, y, CUFFT_INVERSE); // y is of size [n m]
cudaMalloc(&yy, n*m*sizeof(complex));
Reordering_a0_Kernel<<<blocks2, threads1>>>(y, n, m, yy);
Reordering_a_Kernel<<<blocks2, threads1>>>(yy, n, m, d_out);
// end============================================
// transfer result from device to host
cudaMemcpy(h_out, d_out, data_size, cudaMemcpyDeviceToHost);
// cleanup
cufftDestroy(plan);
cudaFree(ww);
cudaFree(y);
cudaFree(yy);
cudaFree(d_in);
cudaFree(d_out);
}
Then I compared the result of my CUDA IDCT (i.e. cuIDCT.cu) against MATLAB idct.m using following code:
a test main.cpp function, and
a MATLAB main function main.m to read result from CUDA and compare it against MATLAB.
main.cpp
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <helper_functions.h>
#include <stdlib.h>
#include <stdio.h>
// N must equal to M, and both must be even numbers
#define N 256
#define M 256
void WriteDataFile(const char *name, int w, int h, const float *in, const float *out)
{
FILE *stream;
stream = fopen(name, "wb");
float data = 202021.25f;
fwrite(&data, sizeof(float), 1, stream);
fwrite(&w, sizeof(w), 1, stream);
fwrite(&h, sizeof(h), 1, stream);
for (int i = 0; i < h; i++)
for (int j = 0; j < w; j++)
{
const int pos = j + i * h;
fwrite(in + pos, sizeof(float), 1, stream);
fwrite(out + pos, sizeof(float), 1, stream);
}
fclose(stream);
}
void cuIDCT(float *b, int n, int m, float *a);
int main()
{
// host memory allocation
float *h_in = new float [N * M];
float *h_out = new float [N * M];
float *h_temp = new float [N * M];
// input data initialization
for (int i = 0; i < N * M; i++)
{
h_in[i] = (float)rand()/(float)RAND_MAX;
h_out[i] = h_in[i];
h_temp[i] = h_in[i];
}
// please comment either one case for testing
// test case 1: use cuIDCT.cu once
// cuIDCT(h_in, N, M, h_out);
// test case 2: iteratively use cuIDCT.cu
for (int i = 0; i < 4; i++)
{
if (i % 2 == 0)
cuIDCT(h_out, N, M, h_temp);
else
cuIDCT(h_temp, N, M, h_out);
}
// write data, for further visualization using MATLAB
WriteDataFile("test.flo", N, M, h_in, h_out);
// cleanup
delete [] h_in;
delete [] h_out;
delete [] h_temp;
cudaDeviceReset();
}
main.m
clc;clear;
% read
[h_in, h_out] = read_data('test.flo');
% MATLAB result, for test case 1, comment the for-loop
matlab_out = h_in;
for i = 1:4
matlab_out = idct(matlab_out);
end
% compare
err = matlab_out - h_out;
% show
figure(1);
subplot(221); imshow(h_in, []); title('h\_in'); colorbar
subplot(222); imshow(h_out, []); title('h\_out'); colorbar
subplot(223); imshow(matlab_out, []); title('matlab\_out'); colorbar
subplot(224); imshow(err, []); title('error map'); colorbar
disp(['maximum error between CUDA and MATLAB is ' ...
num2str(max(max(abs(err))))])
I ran the code on Visual Studio 11 (i.e. VS2012) in Windows 7 with Nvidia GPU Tesla K20c, using CUDA Toolkit version 7.5, and my MATLAB version is R2015b.
My test steps:
For test case 1. Un-comment test case 1 and comment test case 2.
Run main.cpp.
Run main.m in MATLAB.
Repeat step 1 and step 2 (without any change, just re-run the code).
I repeated step 3 for 20 times. The output result is unchanged, and results in main.m are:
results of test case 1
The maximum error is 7.7152e-07.
For test case 2. Un-comment test case 2 and comment test case 1.
Run main.cpp.
Run main.m in MATLAB.
Repeat step 1 and step 2 (without any change, just re-run the code).
I repeated step 3 for 20 times. The output result is changed, and results in main.m are (not enough reputation to put all images, only wrong case is shown below):
one situation (the wrong one) of test case 2
The maximum error is 0.45341 (2 times), 0.44898 (1 time), 0.26186 (1 time), 0.26301 (1 time), and 9.5716e-07 (15 times).
From the test results, my conclusion is:
From test case 1: cuIDCT.cu is numerically correct (error ~10^-7) to idct.m.
From test case 2: recursively use of cuIDCT.cu leads to unstable result (i.e. the output changes every time when re-run the code and may sometimes be numerically wrong, error ~0.1)
My question:
From test case 1 we know cuIDCT.cu is numerically correct to idct.m. But why recursiviely use of cuIDCT.cu leads to different output result each time when re-run the code?
Any helps or suggestions are highly appreciated.
I believe the variability in your results is coming about due to this code in your idct_ComputeEvenKernel:
// Compute precorrection factor
ww[0].x = ww[0].x / sqrtf(2);
ww[0].y = ww[0].y / sqrtf(2);
It's not entirely clear what your intent is here, but it's doubtful that this code could be doing what you want. You may be confused about the CUDA execution model.
The above code will be executed by every CUDA thread that you launch for that kernel that passes the thread check:
if (ix >= n || iy >= m) return;
I believe this means 65536 threads will all execute this code in that kernel. Furthermore, the threads will execute that code in more-or-less any order (not all CUDA threads execute in lock-step). They may even step on each other as they are trying to write out their values to the location ww[0]. So the final result in ww[0] will be quite unpredictable.
When I comment out those lines of code, the results become stable for me (albeit different from what they were with those lines in place), unchanging from run to run.
I'd like to point something else out. Wherever you are calculating the .x and .y values of a complex quantity, my suggestion would be to rework the code from this (for example):
y[iy + ix*m].x = ww[iy].x * b[pos];
y[iy + ix*m].y = ww[iy].y * b[pos];
to this:
complex temp1, temp2;
temp1 = ww[iy];
temp2.x = temp1.x * b[pos];
temp2.y = temp2.y * b[pos];
y[iy + ix*m] = temp2;
At least according to my testing, the compiler doesn't seem to be making this optimization for you, and one side-effect benefit is that it's much easier to test your code with cuda-memcheck --tool initcheck .... In the first realization, the compiler will load y[iy + ix*m] as an 8 byte quantity, modify either 4 or 8 bytes of it, then store y[iy + ix*m] as an 8 byte quantity. The second realization should be more efficient (it eliminates the load of y[]), and eliminates the load of an uninitialized quantity (y[]), which the cuda-memcheck tool will report as a hazard.
This variability I'm describing should be possible whether you run either the 1-pass version of your code or the 4-pass version of your code. Therefore I think your statements about the 1-pass version being correct are suspect. I think if you run the 1-pass version enough, you will eventually see variability (although it may require varying initial memory conditions, or running on different GPU types). Even in your own results, we see that 15 out of 20 runs of the 4 pass code produce "correct" results, i.e. the residual error is ~1e-7
Here's my modified cuIDCT.cu file, modified from the version you posted here. The assumption I'm making below is that you wanted to compute the scaling on ww[0] only once, in which case we can easily handle that arithmetic as an addendum to the previous idct_ComputeWeightsKernel:
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cufft.h>
#include <cuComplex.h>
#include <helper_cuda.h>
#include "assert.h"
// round up n/m
inline int iDivUp(int n, int m)
{
return (n + m - 1) / m;
}
typedef cufftComplex complex;
#define PI 3.1415926535897932384626433832795028841971693993751
#define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__)
inline void __cufftSafeCall(cufftResult err, const char *file, const int line)
{
if( CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
__global__
void idct_ComputeWeightsKernel(const int n, complex *ww)
{
const int pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos >= n) return;
complex temp;
temp.x = sqrtf(2*n) * cosf(pos*PI/(2*n));
temp.y = sqrtf(2*n) * sinf(pos*PI/(2*n));
if (pos == 0) {
temp.x /= sqrtf(2);
temp.y /= sqrtf(2);}
ww[pos] = temp;
}
__global__
void idct_ComputeEvenKernel(const float *b, const int n, const int m, complex *ww, complex *y)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
/* handle this in idct_ComputeWeightsKernel
// Compute precorrection factor
ww[0].x = ww[0].x / sqrtf(2);
ww[0].y = ww[0].y / sqrtf(2);
*/
complex temp1, temp2;
temp1 = ww[iy];
temp2.x = temp1.x * b[pos];
temp2.y = temp1.y * b[pos];
y[iy + ix*m] = temp2;
}
__global__
void Reordering_a0_Kernel(complex *y, const int n, const int m, complex *yy)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
complex temp1, temp2;
temp1 = y[pos];
temp2.x = temp1.x / (float) n;
temp2.y = temp1.y / (float) n;
yy[iy + ix*n] = temp2;
}
__global__
void Reordering_a_Kernel(complex *yy, const int n, const int m, float *a)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= n || iy >= m) return;
const int pos = ix + iy*n;
// Re-order elements of each column according to equations (5.93) and (5.94) in Jain
if (iy < n/2)
{
a[ix + 2*iy*n] = yy[pos].x;
a[ix + (2*iy+1)*n] = yy[ix + (m-iy-1)*n].x;
}
}
/**
* a = idct(b), where a is of size [n m].
* #param b, input array
* #param n, first dimension of a
* #param m, second dimension of a
* #param a, output array
*/
void cuIDCT(float *h_in, int n, int m, float *h_out) // a is of size [n m]
{
const int data_size = n * m * sizeof(float);
// device memory allocation
float *d_in, *d_out;
checkCudaErrors(cudaMalloc(&d_in, data_size));
checkCudaErrors(cudaMalloc(&d_out, data_size));
// transfer data from host to device
checkCudaErrors(cudaMemcpy(d_in, h_in, data_size, cudaMemcpyHostToDevice));
// compute IDCT using CUDA
// begin============================================
// Compute weights
complex *ww;
checkCudaErrors(cudaMalloc(&ww, n*sizeof(complex)));
dim3 threads(256);
dim3 blocks(iDivUp(n, threads.x));
idct_ComputeWeightsKernel<<<blocks, threads>>>(n, ww);
complex *y;
complex *yy;
cufftHandle plan;
dim3 threads1(32, 6);
dim3 blocks2(iDivUp(n, threads1.x), iDivUp(m, threads1.y)); // for even case
int Length[1] = {m}; // for each IFFT, the length is m
checkCudaErrors(cudaMalloc(&y, n*m*sizeof(complex)));
idct_ComputeEvenKernel<<<blocks2, threads1>>>(d_in, n, m, ww, y);
cufftSafeCall(cufftPlanMany(&plan, 1, Length,
Length, 1, m,
Length, 1, m, CUFFT_C2C, n));
cufftSafeCall(cufftExecC2C(plan, y, y, CUFFT_INVERSE)); // y is of size [n m]
checkCudaErrors(cudaMalloc(&yy, n*m*sizeof(complex)));
Reordering_a0_Kernel<<<blocks2, threads1>>>(y, n, m, yy);
cudaMemset(d_out, 0, data_size);
Reordering_a_Kernel<<<blocks2, threads1>>>(yy, n, m, d_out);
// end============================================
// transfer result from device to host
checkCudaErrors(cudaMemcpy(h_out, d_out, data_size, cudaMemcpyDeviceToHost));
// cleanup
cufftDestroy(plan);
checkCudaErrors(cudaFree(ww));
checkCudaErrors(cudaFree(y));
checkCudaErrors(cudaFree(yy));
checkCudaErrors(cudaFree(d_in));
checkCudaErrors(cudaFree(d_out));
}
You'll note I threw an extra cudaMemset on d_out in there, because it helped me clean up an issue with cuda-memcheck --tool initcheck .... It shouldn't be necessary, you can delete it if you want.

Transformation of coordinates, Concept

i want to convert x,y,z coordinates to polar coordinates. I am getting (-) in y coordiantes. Can someone explain me why I am getting it. It would be great help.
I am reading these values (xyz , az_elev_r) from a software and can't be changed.I am just not sure of the order of angles( az & elev). Using my code I get -y instead of y. It means there is 180 rotation.My code is:
xyz=[-0.564 3.689 -0.735;
2.011 5.067 -1.031;
-1.181 3.943 -1.825; % Reference values
];
%% az_elev_r-->xyz
az_elev_r=[ 261.30 -11.24 3.80;
291.65 -10.692 5.548;
253.34 -23.897 4.50]; % Also Reference (degree)
az_elev_r(:,1:2)=deg2rad(az_elev_r(:,1:2));
r=az_elev_r(:,3);
az=az_elev_r(:,1);
elev=az_elev_r(:,2);
x=r.*cos(az).*cos(elev)
y=r.*sin(az).*cos(elev)
z=r.*sin(elev)
Your az_elev_r matrix is not consistent with your xyz reference.
>> [az, el, r] = cart2sph(xyz(:,1), xyz(:,2), xyz(:,3));
>> rad2deg(az)
ans =
98.6924675475501
68.3527736950233
106.673911589314
Your answers are consistent with the values returned by the sph2cart function. (Example starts with your original input, before the dec2rad replacement.
>> [x, y, z] = sph2cart(deg2rad(az_elev_r(:,1)), deg2rad(az_elev_r(:,2)), az_elev_r(:,3))
x =
-0.563766229670505
2.01131973806906
-1.17951822049783
y =
-3.68422880893852
-5.06709019311118
-3.94153436658676
z =
-0.740692730942158
-1.02931719412937
-1.82292172199717
Incidentally, you're code will be more readable if you just use the sph2cart function, and work in radians, unless you are trying to understand the conversions for their own sake.
OpenCV has the code for conversion to polar coordinates and back. This conversion is useful for finding object rotation through correlation or otherwise creating object-centred 'rotation-independent' representation of objects. It is useful to visualize each of the polar coordinates as well as their joint image. The images below should be self_explanatory. The polar plot has angle as a horizontal axis and Radius as a vertical axis, so that 4 peaks correspond to the 4 corners of the input image. The code (C++ with OpenCV) is attached.
//================================
// Name : PolarCoord.cpp
// Author : V.Ivanchenko cudassimo#gmail.com
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//======================================
#include <iostream>
#include "opencv.hpp"
using namespace std;
using namespace cv;
#define VALID(x, y, w, h) ((x)>=0 && (y)>=0 && (x)<(w) && (y)<(h)) // validates index
/*
* 1. Original binary image HxW CV_8U
* |
* |
* V
* 2. Two coordinate Mats HxW CV_32F
* |
* |
* V
* 3. Visualization CV_8U
* a. gray HxW for a single coordinate image
* b. binary Rx360 for two coordinate images
*/
// convert a binary 2D image into two Mats with float coordiantes
void imageToCoord(const Mat& img, Mat& X, Mat& Y, bool centered = true) {
if (img.empty())
return;
int h = img.rows;
int w = img.cols;
X.create(h, w, CV_32F);
Y.create(h, w, CV_32F);
float Cx = w/2.0f;
float Cy = h/2.0f;
for (int i=0; i<h; ++i){
const uchar* img_row = img.ptr<uchar>(i);
float* x_row = X.ptr<float>(i);
float* y_row = Y.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (img_row[j]>0) {
float x = j;
float y = i;
if (centered) {
x-=Cx;
y-=Cy;
}
x_row[j] = x;
y_row[j] = y;
}
} // j
} // i
} //imageToCoord()
// convert a single float ploar coord Mat to a gray image
void polarToImg(const Mat& PolarCoord, Mat& img) {
if (PolarCoord.empty())
return;
int h = PolarCoord.rows;
int w = PolarCoord.cols;
img.create(h, w, CV_8U);
float maxVal = std::numeric_limits<float>::min();
// find maxVal
for (int i=0; i<h; ++i){
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (maxVal < x_row[j])
maxVal = x_row[j];
} // j
} // i
// create an image
if (maxVal>0) {
float k = 255.0/maxVal;
for (int i=0; i<h; ++i){
uchar* img_row = img.ptr<uchar>(i);
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
img_row[j] = saturate_cast<uchar>(k*x_row[j]);
}// j
} // i
} // if
} // plarToImg()
// convert two polar coord Mats to a binary image
void polarToImg(const Mat& radius, const Mat& angle, Mat& img) {
if (angle.empty() || radius.empty())
return;
int h = angle.rows;
int w = angle.cols;
assert(radius.cols==w && radius.rows==h);
const int imgH = sqrt(h*h+w*w)+0.5f; // radius
const int imgW = 360; // angle, deg
img.create(imgH, imgW, CV_8U);
// create an image
for (int i=0; i<h; ++i){
const float* ang_row = angle.ptr<float>(i);
const float* r_row = radius.ptr<float>(i);
for (int j=0; j<w; ++j) {
int x = ang_row[j] + 0.5f;
int y = r_row[j] + 0.5f;
if (x>0) {
cout<<x<<endl;
}
if (VALID(x, y, imgW, imgH))
img.at<uchar>(y, x) = 255;
else {
cout<<"Invalid x, y: "<<x<<", "<<y<<endl;
}
}// j
} // i
} // plarToImg()
int main() {
cout << "Cartesian to polar" << endl; // prints "Syntax training in openCV"
const int W=400, H=400;
Mat Minput(H, W, CV_8U);
Minput(Rect(W/4, H/4, W/2, H/2)) = 255;
Mat X, Y, Angle, Radius, Mr, Mang, Mpolar;
// processing
imageToCoord(Minput, X, Y); // extract coordinates
cartToPolar(X, Y, Radius, Angle, true);// convert coordiantes
// visualize
polarToImg(Radius, Mr);
polarToImg(Angle, Mang);
polarToImg(Radius, Angle, Mpolar);
// debug
//cout<<Mpolar<<endl;
namedWindow("input", 0);
namedWindow("angle", 0);
namedWindow("radius", 0);
namedWindow("Polar", 0);
const int winw=200, winh=200;
resizeWindow("input", winw, winh);
resizeWindow("angle", winw, winh);
resizeWindow("radius", winw, winh);
resizeWindow("Polar", 360, (int)sqrt(H*H + W*W));
moveWindow("input", 0, 0);
moveWindow("angle", winw, 0);
moveWindow("radius", 2*winw, 0);
moveWindow("Polar", 3*winw, 0);
imshow("input", Minput);
imshow("angle", Mang);
imshow("radius", Mr);
imshow("Polar", Mpolar);
waitKey(-1);
return 0;
}

Using Thrust library of CUDA for large values

Hi I wanted to implement a loop which is extremely large in thrust but i find it much slower than normal C++ code. Can you please tell me where am i going wrong.
fi and fj are host vectors
xsize usually is around a 7-8 digit number
thrust::host_vector <double> df((2*floor(r)*(floor(r)+1)+1)*n*n);
thrust::device_vector<double> gpu_df((2*floor(r)*(floor(r)+1)+1)*n*n);
for(i=0;i<xsize;i++)
{
gpu_df[i]=(fi[i]-fj[i]);
if(gpu_df[i]<0)
gpu_df[i]=0;
else
gpu_df[i]=gpu_df[i]*(fi[i]-fj[i]);
if(gpu_df[i]>255)
gpu_df[i]=255;
// cout<<fi[i]<<"\n";
}
df=gpu_df;
I feel the code is not being parallelized. Could you please help me out.
To run programs on the GPU with Thrust you need to write them in terms of Thrust algorithms like reduce, transform, sort, etc. In this case we can write the computation in terms of transform, since the loop was just computing a function F(fi[i], fj[i]) and storing the result in df[i]. Note that we must first move the input arrays to the device before calling transform because Thrust requires the input and output arrays to live in the same place.
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <cstdio>
struct my_functor
: public thrust::binary_function<float,float,float>
{
__host__ __device__
float operator()(float fi, float fj)
{
float d = fi - fj;
if (d < 0)
d = 0;
else
d = d * d;
if (d > 255)
d = 255;
return d;
}
};
int main(void)
{
size_t N = 5;
// allocate storage on host
thrust::host_vector<float> cpu_fi(N);
thrust::host_vector<float> cpu_fj(N);
thrust::host_vector<float> cpu_df(N);
// initialze fi and fj arrays
cpu_fi[0] = 2.0; cpu_fj[0] = 0.0;
cpu_fi[1] = 0.0; cpu_fj[1] = 2.0;
cpu_fi[2] = 3.0; cpu_fj[2] = 1.0;
cpu_fi[3] = 4.0; cpu_fj[3] = 5.0;
cpu_fi[4] = 8.0; cpu_fj[4] = -8.0;
// copy fi and fj to device
thrust::device_vector<float> gpu_fi = cpu_fi;
thrust::device_vector<float> gpu_fj = cpu_fj;
// allocate storage for df
thrust::device_vector<float> gpu_df(N);
// perform transformation
thrust::transform(gpu_fi.begin(), gpu_fi.end(), // first input range
gpu_fj.begin(), // second input range
gpu_df.begin(), // output range
my_functor()); // functor to apply
// copy results back to host
thrust::copy(gpu_df.begin(), gpu_df.end(), cpu_df.begin());
// print results on host
for (size_t i = 0; i < N; i++)
printf("f(%2.0lf,%2.0lf) = %3.0lf\n", cpu_fi[i], cpu_fj[i], cpu_df[i]);
return 0;
}
For reference, here's the output of the program:
f( 2, 0) = 4
f( 0, 2) = 0
f( 3, 1) = 4
f( 4, 5) = 0
f( 8,-8) = 255

iPhone FFT with Accelerate framework vDSP

I'm having difficulty implementing an FFT using vDSP. I understand the theory but am looking for a specific code example please.
I have data from a wav file as below:
Question 1. How do I put the audio data into the FFT?
Question 2. How do I get the output data out of the FFT?
Question 3. The ultimate goal is to check for low frequency sounds. How would I do this?
-(OSStatus)open:(CFURLRef)inputURL{
OSStatus result = -1;
result = AudioFileOpenURL (inputURL, kAudioFileReadPermission, 0, &mAudioFile);
if (result == noErr) {
//get format info
UInt32 size = sizeof(mASBD);
result = AudioFileGetProperty(mAudioFile, kAudioFilePropertyDataFormat, &size, &mASBD);
UInt32 dataSize = sizeof packetCount;
result = AudioFileGetProperty(mAudioFile, kAudioFilePropertyAudioDataPacketCount, &dataSize, &packetCount);
NSLog([NSString stringWithFormat:#"File Opened, packet Count: %d", packetCount]);
UInt32 packetsRead = packetCount;
UInt32 numBytesRead = -1;
if (packetCount > 0) {
//allocate buffer
audioData = (SInt16*)malloc( 2 *packetCount);
//read the packets
result = AudioFileReadPackets (mAudioFile, false, &numBytesRead, NULL, 0, &packetsRead, audioData);
NSLog([NSString stringWithFormat:#"Read %d bytes, %d packets", numBytesRead, packetsRead]);
}
}
return result;
}
FFT code below:
log2n = N;
n = 1 << log2n;
stride = 1;
nOver2 = n / 2;
printf("1D real FFT of length log2 ( %d ) = %d\n\n", n, log2n);
/* Allocate memory for the input operands and check its availability,
* use the vector version to get 16-byte alignment. */
A.realp = (float *) malloc(nOver2 * sizeof(float));
A.imagp = (float *) malloc(nOver2 * sizeof(float));
originalReal = (float *) malloc(n * sizeof(float));
obtainedReal = (float *) malloc(n * sizeof(float));
if (originalReal == NULL || A.realp == NULL || A.imagp == NULL) {
printf("\nmalloc failed to allocate memory for the real FFT"
"section of the sample.\n");
exit(0);
}
/* Generate an input signal in the real domain. */
for (i = 0; i < n; i++)
originalReal[i] = (float) (i + 1);
/* Look at the real signal as an interleaved complex vector by
* casting it. Then call the transformation function vDSP_ctoz to
* get a split complex vector, which for a real signal, divides into
* an even-odd configuration. */
vDSP_ctoz((COMPLEX *) originalReal, 2, &A, 1, nOver2);
/* Set up the required memory for the FFT routines and check its
* availability. */
setupReal = vDSP_create_fftsetup(log2n, FFT_RADIX2);
if (setupReal == NULL) {
printf("\nFFT_Setup failed to allocate enough memory for"
"the real FFT.\n");
exit(0);
}
/* Carry out a Forward and Inverse FFT transform. */
vDSP_fft_zrip(setupReal, &A, stride, log2n, FFT_FORWARD);
vDSP_fft_zrip(setupReal, &A, stride, log2n, FFT_INVERSE);
/* Verify correctness of the results, but first scale it by 2n. */
scale = (float) 1.0 / (2 * n);
vDSP_vsmul(A.realp, 1, &scale, A.realp, 1, nOver2);
vDSP_vsmul(A.imagp, 1, &scale, A.imagp, 1, nOver2);
/* The output signal is now in a split real form. Use the function
* vDSP_ztoc to get a split real vector. */
vDSP_ztoc(&A, 1, (COMPLEX *) obtainedReal, 2, nOver2);
/* Check for accuracy by looking at the inverse transform results. */
Compare(originalReal, obtainedReal, n);
Thanks
You put your audio sample data into the real part of the input, and zero the imaginary part.
If you are just interested in the magnitude of each bin in the frequency domain then you calculate sqrt(re*re + im*im) for each output bin. If you're only interested in relative magnitude then you can drop the sqrt and just calculate the squared magnitude, (re*re + im*im).
You would look at the magnitudes of the bin or bins (see (2)) that correspond to your frequency or frequencies of interest. If your sample rate is Fs, and your FFT size is N, then the corresponding frequency for output bin i is given by f = i * Fs / N. Conversely if you are interested in a specific frequency f then the bin of interest, i, is given by i = N * f / Fs.
Additional note: you will need to apply a suitable window function (e.g. Hann aka Hanning) to your FFT input data, prior to calculating the FFT itself.
You can check Appleā€™s documentation and take good care of data packing.
Here is my example:
// main.cpp
// FFTTest
//
// Created by Harry-Chris Stamatopoulos on 11/23/12.
//
/*
This is an example of a hilbert transformer using
Apple's VDSP fft/ifft & other VDSP calls.
Output signal has a PI/2 phase shift.
COMPLEX_SPLIT vector "B" was used to cross-check
real and imaginary parts coherence with the original vector "A"
that is obtained straight from the fft.
Tested and working.
Cheers!
*/
#include <iostream>
#include <Accelerate/Accelerate.h>
#define PI 3.14159265
#define DEBUG_PRINT 1
int main(int argc, const char * argv[])
{
float fs = 44100; //sample rate
float f0 = 440; //sine frequency
uint32_t i = 0;
uint32_t L = 1024;
/* vector allocations*/
float *input = new float [L];
float *output = new float[L];
float *mag = new float[L/2];
float *phase = new float[L/2];
for (i = 0 ; i < L; i++)
{
input[i] = cos(2*PI*f0*i/fs);
}
uint32_t log2n = log2f((float)L);
uint32_t n = 1 << log2n;
//printf("FFT LENGTH = %lu\n", n);
FFTSetup fftSetup;
COMPLEX_SPLIT A;
COMPLEX_SPLIT B;
A.realp = (float*) malloc(sizeof(float) * L/2);
A.imagp = (float*) malloc(sizeof(float) * L/2);
B.realp = (float*) malloc(sizeof(float) * L/2);
B.imagp = (float*) malloc(sizeof(float) * L/2);
fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2);
/* Carry out a Forward and Inverse FFT transform. */
vDSP_ctoz((COMPLEX *) input, 2, &A, 1, L/2);
vDSP_fft_zrip(fftSetup, &A, 1, log2n, FFT_FORWARD);
mag[0] = sqrtf(A.realp[0]*A.realp[0]);
//get phase
vDSP_zvphas (&A, 1, phase, 1, L/2);
phase[0] = 0;
//get magnitude;
for(i = 1; i < L/2; i++){
mag[i] = sqrtf(A.realp[i]*A.realp[i] + A.imagp[i] * A.imagp[i]);
}
//after done with possible phase and mag processing re-pack the vectors in VDSP format
B.realp[0] = mag[0];
B.imagp[0] = mag[L/2 - 1];;
//unwrap, process & re-wrap phase
for(i = 1; i < L/2; i++){
phase[i] -= 2*PI*i * fs/L;
phase[i] -= PI / 2 ;
phase[i] += 2*PI*i * fs/L;
}
//construct real & imaginary part of the output packed vector (input to ifft)
for(i = 1; i < L/2; i++){
B.realp[i] = mag[i] * cosf(phase[i]);
B.imagp[i] = mag[i] * sinf(phase[i]);
}
#if DEBUG_PRINT
for (i = 0 ; i < L/2; i++)
{
printf("A REAL = %f \t A IMAG = %f \n", A.realp[i], A.imagp[i]);
printf("B REAL = %f \t B IMAG = %f \n", B.realp[i], B.imagp[i]);
}
#endif
//ifft
vDSP_fft_zrip(fftSetup, &B, 1, log2n, FFT_INVERSE);
//scale factor
float scale = (float) 1.0 / (2*L);
//scale values
vDSP_vsmul(B.realp, 1, &scale, B.realp, 1, L/2);
vDSP_vsmul(B.imagp, 1, &scale, B.imagp, 1, L/2);
//unpack B to real interleaved output
vDSP_ztoc(&B, 1, (COMPLEX *) output, 2, L/2);
// print output signal values to console
printf("Shifted signal x = \n");
for (i = 0 ; i < L/2; i++)
printf("%f\n", output[i]);
//release resources
free(input);
free(output);
free(A.realp);
free(A.imagp);
free(B.imagp);
free(B.realp);
free(mag);
free(phase);
}
One thing you need to be careful to is the DC component of the calculated FFT. I compared my results with the fftw library FFT and the imaginary part of the transform calculated with the vDSP library always had a different value at index 0 (which means 0 frequency, so DC).
Another measure I applied was to divide both real and imaginary parts by a factor of 2. I guess this is due to the algorithm used in the function. Also, both these problems occurred in the FFT process but not in the IFFT process.
I used vDSP_fft_zrip.