Output of Forking in C - operating-system

Given PID = 4224
Why am I getting output of the following snippet of code as :
x = 10922 y = 0
I am process: 10923
x = 0 y = 2
I am process: 10922
x = 0 y = 2
I am process: 10922
x = 0 y = 0
I am process: 10924
For this code :
#include <iostream>
#include <stdio.h>
#include <unistd.h>
int main() {
int x = 1, y = 2;
int pid;
x=fork();
if(x==0){
printf("x = %d y = %d\n", x, y);
pid = getpid();
printf("I am process: %d\n", pid);
}
y = fork();
if(y==0){
printf("x = %d y = %d\n", x, y);
pid = getpid();
printf("I am process: %d\n", pid);
}
return 0;
}

Lets start with this, the output of your program:
x = 0 y = 2
I am process: 330161
x = 330161 y = 0
I am process: 330162
x = 0 y = 0
I am process: 330163
Notice that the order is a bit different from yours. That is because which process runs first is environment dependent. So, lets fix your program up a bit to be more lucid:
#include <stdio.h>
#include <unistd.h>
int main() {
int x = 1, y = 2;
int pid;
x=fork();
if(x==0){
pid = getpid();
printf("1:x = %d y = %d I am process %d\n", x, y, pid);
}
fflush(NULL);
y = fork();
if(y==0){
pid = getpid();
printf("2:x = %d y = %d I am process %d\n", x, y, pid);
}
return 0;
}
By placing the printfs together with a single newline, we can rest assured that each statement belongs to one process, they are not interleaved.
By adding an indicator at the beginning of the line 1:, 2: we know which printf() they came from. The fflush() prevents the child process from inheriting any partially buffered writes from stdio -- not a problem if you are on a terminal, but if you redirect to a file you might find duplicated lines. So the output on my system looks like:
1:x = 0 y = 2 I am process 330214
2:x = 330214 y = 0 I am process 330215
2:x = 0 y = 0 I am process 330216
From that, the first child process of the main one is 330214. The second is 330215; and the grand-child (child of child) is 330216.
I hope that gives you enough to work out your answers.
Don't be afraid to play with these example programs. It is a good way to learn about the workings of your system.

Related

Determine if matrix A is subset of matrix B

For a matrix such as
A = [...
12 34 67;
90 78 15;
10 71 24];
how could we determine efficiently if it is subset of a larger matrix?
B = [...
12 34 67; % found
89 67 45;
90 78 15; % found
10 71 24; % found, so A is subset of B.
54 34 11];
Here are conditions:
all numbers are integers
matrices are so large, i.e., row# > 100000, column# may vary from 1 to 10 (same for A and B).
Edit:
It seems that ismember for the case of this question, when called only few times works just fine. My initial impression was due to previous experiences where ismember was being invoked many times inside a nested loop resulting in the worst performance.
clear all; clc
n = 200000;
k = 10;
B = randi(n,n,k);
f = randperm(n);
A = B(f(1:1000),:);
tic
assert(sum(ismember(A,B,'rows')) == size(A,1));
toc
tic
assert(all(any(all(bsxfun(#eq,B,permute(A,[3,2,1])),2),1))); %user2999345
toc
which results in:
Elapsed time is 1.088552 seconds.
Elapsed time is 12.154969 seconds.
Here are more benchmarks:
clear all; clc
n = 20000;
f = randperm(n);
k = 10;
t1 = 0;
t2 = 0;
t3 = 0;
for i=1:7
B = randi(n,n,k);
A = B(f(1:n/10),:);
%A(100,2) = 0; % to make A not submat of B
tic
b = sum(ismember(A,B,'rows')) == size(A,1);
t1 = t1+toc;
assert(b);
tic
b = ismember_mex(A,sortrows(B));
t2 = t2+toc;
assert(b);
tic
b = issubmat(A,B);
t3 = t3+toc;
assert(b);
end
George's skm's
ismember | ismember_mex | issubmat
n=20000,k=10 0.6326 0.1064 11.6899
n=1000,k=100 0.2652 0.0155 0.0577
n=1000,k=1000 1.1705 0.1582 0.2202
n=1000,k=10000 13.2470 2.0033 2.6367
*issubmat eats RAM when n or k is over 10000!
*issubmat(A,B), A is being checked as submat of B.
It seems that ismember is hard to beat, at least using MATLAB code. I created a C implementation which can be used using the MEX compiler.
#include "mex.h"
#if MX_API_VER < 0x07030000
typedef int mwIndex;
typedef int mwSize;
#endif /* MX_API_VER */
#include <math.h>
#include <stdlib.h>
#include <string.h>
int ismember(const double *y, const double *x, int yrow, int xrow, int ncol);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
mwSize xcol, ycol, xrow, yrow;
/* output data */
int* result;
/* arguments */
const mxArray* y;
const mxArray* x;
if (nrhs != 2)
{
mexErrMsgTxt("2 input required.");
}
y = prhs[0];
x = prhs[1];
ycol = mxGetN(y);
yrow = mxGetM(y);
xcol = mxGetN(x);
xrow = mxGetM(x);
/* The first input must be a sparse matrix. */
if (!mxIsDouble(y) || !mxIsDouble(x))
{
mexErrMsgTxt("Input must be of type 'double'.");
}
if (xcol != ycol)
{
mexErrMsgTxt("Inputs must have the same number of columns");
}
plhs[0] = mxCreateLogicalMatrix(1, 1);
result = mxGetPr(plhs[0]);
*result = ismember(mxGetPr(y), mxGetPr(x), yrow, xrow, ycol);
}
int ismemberinner(const double *y, int idx, const double *x, int yrow, int xrow, int ncol) {
int from, to, i;
from = 0;
to = xrow-1;
for(i = 0; i < ncol; ++i) {
// Perform binary search
double yi = *(y + i * yrow + idx);
double *curx = x + i * xrow;
int l = from;
int u = to;
while(l <= u) {
int mididx = l + (u-l)/2;
if(yi < curx[mididx]) {
u = mididx-1;
}
else if(yi > curx[mididx]) {
l = mididx+1;
}
else {
// This can be further optimized by performing additional binary searches
for(from = mididx; from > l && curx[from-1] == yi; --from);
for(to = mididx; to < u && curx[to+1] == yi; ++to);
break;
}
}
if(l > u) {
return 0;
}
}
return 1;
}
int ismember(const double *y, const double *x, int yrow, int xrow, int ncol) {
int i;
for(i = 0; i < yrow; ++i) {
if(!ismemberinner(y, i, x, yrow, xrow, ncol)) {
return 0;
}
}
return 1;
}
Compile it using:
mex -O ismember_mex.c
It can be called as follows:
ismember_mex(x, sortrows(x))
First of all, it assumes that the columns of the matrices have the same size. It works by first sorting the rows of the larger matrix (x in this case, the second argument to the function). Then, a type of binary search is employed to identify whether the rows of the smaller matrix (y hereafter) are contained in x. This is done for each row of y separately (see ismember C function).
For a given row of y, it starts from the first entry and finds the range of indices (using the from and to variables) that match with the first column of x using binary search. This is repeated for the remaining entries, unless some value is not found, in which case it terminates and returns 0.
I tried implementing it this idea in MATLAB, but it didn't work that well. Regarding performance, I found that: (a) in case there are mismatches, it is usually much faster than ismember (b) in case the range of values in x and y is large, it is again faster than ismember, and (c) in case everything matches and the number of possible values in x and y is small (e.g. less than 1000), then ismember may be faster in some situations.
Finally, I want to point out that some parts of the C implementation may be further optimized.
EDIT 1
I fixed the warnings and further improved the function.
#include "mex.h"
#include <math.h>
#include <stdlib.h>
#include <string.h>
int ismember(const double *y, const double *x, unsigned int nrowy, unsigned int nrowx, unsigned int ncol);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
unsigned int xcol, ycol, nrowx, nrowy;
/* arguments */
const mxArray* y;
const mxArray* x;
if (nrhs != 2)
{
mexErrMsgTxt("2 inputs required.");
}
y = prhs[0];
x = prhs[1];
ycol = (unsigned int) mxGetN(y);
nrowy = (unsigned int) mxGetM(y);
xcol = (unsigned int) mxGetN(x);
nrowx = (unsigned int) mxGetM(x);
/* The first input must be a sparse matrix. */
if (!mxIsDouble(y) || !mxIsDouble(x))
{
mexErrMsgTxt("Input must be of type 'double'.");
}
if (xcol != ycol)
{
mexErrMsgTxt("Inputs must have the same number of columns");
}
plhs[0] = mxCreateLogicalScalar(ismember(mxGetPr(y), mxGetPr(x), nrowy, nrowx, ycol));
}
int ismemberinner(const double *y, const double *x, unsigned int nrowy, unsigned int nrowx, unsigned int ncol) {
unsigned int from = 0, to = nrowx-1, i;
for(i = 0; i < ncol; ++i) {
// Perform binary search
const double yi = *(y + i * nrowy);
const double *curx = x + i * nrowx;
unsigned int l = from;
unsigned int u = to;
while(l <= u) {
const unsigned int mididx = l + (u-l)/2;
const double midx = curx[mididx];
if(yi < midx) {
u = mididx-1;
}
else if(yi > midx) {
l = mididx+1;
}
else {
{
// Binary search to identify smallest index of x that equals yi
// Equivalent to for(from = mididx; from > l && curx[from-1] == yi; --from)
unsigned int limit = mididx;
while(curx[from] != yi) {
const unsigned int mididx = from + (limit-from)/2;
if(curx[mididx] < yi) {
from = mididx+1;
}
else {
limit = mididx-1;
}
}
}
{
// Binary search to identify largest index of x that equals yi
// Equivalent to for(to = mididx; to < u && curx[to+1] == yi; ++to);
unsigned int limit = mididx;
while(curx[to] != yi) {
const unsigned int mididx = limit + (to-limit)/2;
if(curx[mididx] > yi) {
to = mididx-1;
}
else {
limit = mididx+1;
}
}
}
break;
}
}
if(l > u) {
return 0;
}
}
return 1;
}
int ismember(const double *y, const double *x, unsigned int nrowy, unsigned int nrowx, unsigned int ncol) {
unsigned int i;
for(i = 0; i < nrowy; ++i) {
if(!ismemberinner(y + i, x, nrowy, nrowx, ncol)) {
return 0;
}
}
return 1;
}
Using this version I wasn't able to identify any case where ismember is faster. Also, I noticed that one reason ismember is hard to beat is that it uses all cores of the machine! Of course, the function I provided can be optimized to do this too, but this requires much more effort.
Finally, before using my implementation I would advise you to do extensive testing. I did some testing and it seems to work, but I suggest you also do some additional testing.
For small matrices ismember should be enough, probably.
Usage: ismember(B,A,'rows')
ans =
1
0
1
1
0
I put this answer here, emphasizing on a need to solutions with higher performance. I will accept this answer only if there was no better solution.
Using ismember, if a row of A appears twice in B while another one is missing, might wrongly indicate that A is a member of B. The following solution is suitable if the rows of A and B doesn't need to be in the same order. However, I haven't tested its performance for large matrices.
A = [...
34 12 67;
90 78 15;
10 71 24];
B = [...
34 12 67; % found
89 67 45;
90 78 15; % found
10 71 24; % found, so A is subset of B.
54 34 11];
A = permute(A,[3 2 1]);
rowIdx = all(bsxfun(#eq,B,A),2);
colIdx = any(rowIdx,1);
isAMemberB = all(colIdx);
You have said number of columns <= 10. In addition, if the matrix elements are all integers representable as bytes, you could code each row into a two 64 bit integers. That would reduce the number of comparisons by a factor of 64.
For the general case, the following may not be all that much better for thin matrices, but scales very well as the matrices get fat due to the level 3 multiplication:
function yes = is_submat(A,B)
ma = size(A, 1);
mb = size(B, 1);
n = size(B, 2);
yes = false;
if ma >= mb
a = A(:,1);
b = B(:,1);
D = (0 == bsxfun(#minus, a, b'));
q = any(D, 2);
yes = all(any(D,1));
if yes && (n > 1)
A = A(q, :);
C = B*A';
za = sum(A.*A, 2);
zb = sum(B.*B, 2);
Z = sqrt(zb)*sqrt(za');
[~, ix] = max(C./Z, [], 2);
A = A(ix,:);
yes = all(A(:) == B(:));
end
end
end
In the above, I use the fact that the dot product is maximized when two unit vectors are equal.
For fat matrices (say 5000+ columns) with large numbers of unique elements the performance beats ismember quite handily, but otherwise, it is slower than ismember. For thin matrices ismember is faster by an order of magnitude.
Best case test for this function:
A = randi(50000, [10000, 10000]);
B = A(2:3:end, :);
B = B(randperm(size(B,1)),:);
fprintf('%s: %u\n', 'Number of columns', size(A,2));
fprintf('%s: %u\n', 'Element spread', 50000);
tic; is_submat(A,B); toc;
tic; all(ismember(B,A,'rows')); toc;
fprintf('________\n\n');
is_submat_test;
Number of columns: 10000
Element spread: 50000
Elapsed time is 10.713310 seconds (is_submat).
Elapsed time is 17.446682 seconds (ismember).
So I have to admit, all round ismember seems to be much better.
Edits: Edited to correct bug when there is only one column - fixing this also results in more efficient code. Also previous version did not distinguish between positive and negative numbers. Added timing tests.

Cuda matrix multiplication results differs from MATLAB

Its been two days and I am still cant figure it out why my implementation of CUDA matrix multiplication differs from the results produced in MATLAB.
CUDA kernel: A(200x60000) = W(200x784) * Data(784x6000)
__global__ void CalculateA(Matrix W, Matrix Data, Matrix A)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if ((Row < W.row) && (Col < Data.col)){
float Cvalue = 0.0;
for (int i = 0; i < W.col; ++i){
Cvalue += W.elements[Row*W.col+i] * Data.elements[i*Data.col+Col];
}
A.elements[Row*A.col+Col] = Cvalue;
}
}
And calling the kernel:
void myFunc(Matrix W1, Matrix data){
Matrix d_W1, d_data, d_a2, a2;
size_t size;
a2.row = W1.row; d_a2.row = a2.row;
a2.col = data.col; d_a2.col = a2.col;
size = a2.col*a2.row*sizeof(float);
cudaMalloc(&d_a2.elements,size);
d_W1.row = W1.row; d_W1.col = W1.col;
size = W1.col*W1.row*sizeof(float);
cudaMalloc(&d_W1.elements,size);
cudaMemcpy(d_W1.elements,W1.elements,size,cudaMemcpyHostToDevice);
d_data.col = data.col; d_data.row = data.row;
size = data.row*data.col*sizeof(float);
cudaMalloc(&d_data.elements,size);
cudaMemcpy(d_data.elements,data.elements,size,cudaMemcpyHostToDevice);
dim3 dimGrid(data.col/32 + 1, W1.row/32 + 1, 1);
dim3 dimBlock(32, 32, 1);
CalculateA<<<dimGrid, dimBlock>>>(d_W1, d_data, d_a2);
a2.elements = new float [a2.row*a2.col];
cudaMemcpy(a2.elements,d_a2.elements,sizeof(float)*a2.row*a2.col,cudaMemcpyDeviceToHost);
printf("\nA2 first and last member %f - %f\n",a2.elements[0],a2.elements[a2.row*a2.col-1]);
}
Results difference is not low for example first and last elements of CUDA code is 0.011322 and -0.179534 but multiplying in MATLAB results in 0.4280 and 0.0056.
this is how I do it in MATLAB:
>> size(W1) ans = 200 784
>> size(data) ans = 784 60000
>> z2=W1*data;
>> size(z2) ans = 200 60000
>> z2 = z2(:);
>> z2(1) ans = 0.4280
>> z2(200*60000)ans = 0.0056
There is nothing wrong with the code you posted. If I expand your kernel and function into a complete running example like this:
#include <iostream>
struct Matrix
{
int row;
int col;
float *elements;
__device__ __host__
float& operator()(int r, int c) { return elements[r*col + c]; };
};
__global__ void CalculateA(Matrix W, Matrix Data, Matrix A)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if ((Row < W.row) && (Col < Data.col)){
float Cvalue = 0.0;
for (int i = 0; i < W.col; ++i){
Cvalue += W.elements[Row*W.col+i] * Data.elements[i*Data.col+Col];
}
A.elements[Row*A.col+Col] = Cvalue;
}
}
void myFunc(Matrix W1, Matrix data)
{
Matrix d_W1, d_data, d_a2, a2;
size_t size;
a2.row = W1.row; d_a2.row = a2.row;
a2.col = data.col; d_a2.col = a2.col;
size = a2.col*a2.row*sizeof(float);
cudaMalloc(&d_a2.elements,size);
d_W1.row = W1.row; d_W1.col = W1.col;
size = W1.col*W1.row*sizeof(float);
cudaMalloc(&d_W1.elements,size);
cudaMemcpy(d_W1.elements,W1.elements,size,cudaMemcpyHostToDevice);
d_data.col = data.col; d_data.row = data.row;
size = data.row*data.col*sizeof(float);
cudaMalloc(&d_data.elements,size);
cudaMemcpy(d_data.elements,data.elements,size,cudaMemcpyHostToDevice);
dim3 dimGrid(data.col/32 + 1, W1.row/32 + 1, 1);
dim3 dimBlock(32, 32, 1);
CalculateA<<<dimGrid, dimBlock>>>(d_W1, d_data, d_a2);
a2.elements = new float [a2.row*a2.col];
cudaMemcpy(a2.elements,d_a2.elements,sizeof(float)*a2.row*a2.col,cudaMemcpyDeviceToHost);
for(int j=0; j<a2.col; ++j) {
for(int i=0; i<a2.row; ++i) {
std::cout << a2(i,j) << " ";
}
std::cout << std::endl;
}
}
int main(void)
{
float a[6] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f };
float b[6] = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f};
Matrix W1; W1.row=2; W1.col=3; W1.elements = &a[0];
Matrix Data; Data.row=3; Data.col=2; Data.elements = &b[0];
myFunc(W1, Data);
return 0;
}
and run it, I get this:
>nvcc -arch=sm_21 -Xptxas="-v" -m32 matrix.cu
matrix.cu
tmpxft_000014f4_00000000-5_matrix.cudafe1.gpu
tmpxft_000014f4_00000000-10_matrix.cudafe2.gpu
matrix.cu
ptxas : info : 132 bytes gmem, 28 bytes cmem[14]
ptxas : info : Compiling entry function '_Z10CalculateA6MatrixS_S_' for 'sm_21'
ptxas : info : Function properties for _Z10CalculateA6MatrixS_S_
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas : info : Used 14 registers, 68 bytes cmem[0]
tmpxft_000014f4_00000000-5_matrix.cudafe1.cpp
tmpxft_000014f4_00000000-15_matrix.ii
>cuda-memcheck a.exe
========= CUDA-MEMCHECK
2.2 4.9
2.8 6.4
========= ERROR SUMMARY: 0 errors
which is the correct answer for the dot product assuming column major ordering (which is the Matlab convention).
So if your results don't agree, it is because of something you haven't shown us. One likelihood is that your test problem is so large (and kernel so inefficient) that if you are running this on a display GPU, your program is hitting the display driver watchdog timer limit and being killed before the kernel finishes running. Also note that you have no CUDA API error checking whatsoever, so it is possible that you are getting runtime errors which is either stopping your kernel from finishing or even running at all, but you simply don't notice because of the lack of error checking.

3d patches from a 3d matrix

I have a 3d matrix (3x3x3), and I need to extract 3d patches (2x2x2) and transform them in vectors.
In 2d, simply:
I=randi(5,3,3);
2d_patches=im2col(I,[2 2],'sliding');
What about 3d?
I=randi(5,3,3,3);
3d_patches= ???
im2col just works in 2d. In 3d I should recombine the vectors 1 and 7, 2 and 8, ...
Is there any fast function for this task?
I do not believe that there is any built-in way to do this. If you need it to be fast, it should be fairly simple to write your own mex-function in c and call it from Matlab.
Here is my (quick and dirty) solution:
im3col.c:
#include <mex.h>
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] )
{
const mxArray *I = prhs[0];
double *indata = mxGetPr(I);
double *patchSize = mxGetPr(prhs[1]);
const int *size = mxGetDimensions(I);
int J = (int)patchSize[0], K = (int)patchSize[1], H = (int)patchSize[2];
int M = size[0], N = size[1], P = size[2];
int numPatches = (M - J + 1)*(N - K + 1)*(P - H + 1);
int out_rows = J*K*H, out_cols = numPatches;
mxArray *out = mxCreateDoubleMatrix( out_rows, out_cols, mxREAL );
double *outdata = mxGetPr(out);
int patch = 0;
for( int h_offset = 0; h_offset < P-H+1; h_offset++ ){
for( int k_offset = 0; k_offset < N-K+1; k_offset++ ){
for( int j_offset = 0; j_offset < M-J+1; j_offset++ ){
int row = 0;
for( int h = 0; h < H; h++ ){
for( int k = 0; k < K; k++ ){
for( int j = 0; j < J; j++ ){
outdata[patch*out_rows + row] =
indata[ (j_offset+j) + (k_offset+k)*M + (h_offset+h)*M*N ];
++row;
}}}
++patch;
}}}
plhs[0] = out;
}
Compile:
>> mex -O CFLAGS="\$CFLAGS -std=c99 -Wall" im3col.c
Test:
>> A(:,:,1) = [1,4,7;2,5,8;3,6,9]; A(:,:,2) = [10,13,16;11,14,17;12,15,18];
>> B = im3col(A, [2,2,1])
B =
1 2 4 5 10 11 13 14
2 3 5 6 11 12 14 15
4 5 7 8 13 14 16 17
5 6 8 9 14 15 17 18
>> A(:,:,1),A(:,:,2)
ans =
1 4 7
2 5 8
3 6 9
ans =
10 13 16
11 14 17
12 15 18
Here is the other direction:
(It is pretty slow and there is definitely a faster way)
function [img] = patch2im_2d_time(patch, size_img, size_patch, size_skip, border)
Nx = size_img(1);
Ny = size_img(2);
Nt = size_img(5);
psz1 = size_patch(1);
psz2 = size_patch(2);
psz3 = size_patch(3);
%Extract blocks. One could save a lot here.
patches = reshape(patch, [psz1 psz2 psz3 size(patch,2)]);
c = 1;
img2 = zeros(squeeze(size_img));
%Count for each pixel how many times we added smth to it.
add_count = zeros(size_img);
%The first three loops, loop through all the pixels in the image
for d=1:Nt-psz3+1
for j=1:Nx-psz2+1
for i=1:Ny-psz1+1
%Here we get the next patch. The next patch is always
%the patch that has the pixel at i,j,d at its top front corner.
current_patch = patches(:,:,:,c);
%counter for the next patch
c = c + 1;
%In this loop we add the patch values of each pixel in the
%patch to the image. i,j,d is the base. We add the offset
%ii jj and dd to it. This iteration takes psz^3 many
%iterations.
for dd=1:psz3
for ii=1:psz2
for jj=1:psz1
img2(i+ii-1,j+jj-1,d+dd-1) = img2(i+ii-1,j+jj-1,d+dd-1) + current_patch(ii,jj,dd);
add_count(i+ii-1,j+jj-1,d+dd-1) = add_count(i+ii-1,j+jj-1,d+dd-1) + 1;
end
end
end
end
end
end
img = flipud(rot90(img2 ./ add_count,1));
end
Remember that MATLAB uses col major.
%One possible way to use matlab to call im2col and reshape twice
%N = [row, col, num_frames]
[x_height, ~, num_frames] = size(N);
patchSize = 16;
patchTemporal = 10;
N = reshape(N, x_height, []);
N = im2col(N, [patchSize, patchSize], 'distinct');
N = reshape(N, [], num_frames);
N = im2col(N, [patchSize^2, patchTemporal], 'distinct');
% N = [patchSize^2 *patchTemporal x numPatches]
hi guys what about this solution. To obtain 3x3x3 ROIs from I suggest :
blkSize=3; % should be a odd value like 3,5,7,etc
r=floor(blkSize/2);
k=1;
for sliceNo=(r+1):(size(I,3)-r)
img= I(:,:,sliceNo-r:sliceNo+r);
noPix=(size(img,1)-2*r)*(size(img,2)-2*r);
neiblk=zeros(blkSize^3,noPix);
for blk=1:blkSize
neiblk(blkSize^2*(blk-1)+1:blkSize^2*blk,:)=im2col(img(:,:,blk),...
[blkSize,blkSize],'sliding');
end
ROIs(:,noPix*(k-1)+1:noPix*k)=neiblk;
k=k+1;
end

What is the returned value?

In a language that passes parameters by reference, given the following function:
int function g(x, y) {
x = x + 1;
y = y + 2;
return x + y;
}
If i = 3, and g(i,i) is called, what is value returned? I thought it is 9, is this correct?
If it's pass-by-reference (your original question was C but C doesn't have pass-by-reference and the question has changed since then anyway, so I'll answer generically), it's probably the case that x and y will simply modify the variables that are passed in for them. That's what a reference is, after all.
In this case, they're both a reference to the same variable i, so your sequence is likely to be:
i = i + 1; // i becomes 4.
i = i + 2; // i becomes 6.
return i + i; // return i + i, or 12.
You can see this in operation with the following C (using pointers to emulate pass-by-reference):
pax$ cat qq.c
#include <stdio.h>
int g(int *x, int *y) {
*x = *x + 1;
*y = *y + 2;
return *x + *y;
}
int main (void) {
int i = 3;
int rv = g (&i, &i);
printf ("Returned: %d\n", rv);
return 0;
}
pax$ gcc -o qq qq.c ; ./qq
Returned: 12
Your result of 9 seems to be assuming that the references are distinct from one another, such as in the following code:
#include <stdio.h>
int g(int *x, int *y) {
*x = *x + 1;
*y = *y + 2;
return *x + *y;
}
int main (void) {
int i1 = 3, i2 = 3;
int rv = g (&i1, &i2);
printf ("Returned: %d\n", rv);
return 0;
}
(this does output 9) but that's not usually the case with reference types.

Using Thrust library of CUDA for large values

Hi I wanted to implement a loop which is extremely large in thrust but i find it much slower than normal C++ code. Can you please tell me where am i going wrong.
fi and fj are host vectors
xsize usually is around a 7-8 digit number
thrust::host_vector <double> df((2*floor(r)*(floor(r)+1)+1)*n*n);
thrust::device_vector<double> gpu_df((2*floor(r)*(floor(r)+1)+1)*n*n);
for(i=0;i<xsize;i++)
{
gpu_df[i]=(fi[i]-fj[i]);
if(gpu_df[i]<0)
gpu_df[i]=0;
else
gpu_df[i]=gpu_df[i]*(fi[i]-fj[i]);
if(gpu_df[i]>255)
gpu_df[i]=255;
// cout<<fi[i]<<"\n";
}
df=gpu_df;
I feel the code is not being parallelized. Could you please help me out.
To run programs on the GPU with Thrust you need to write them in terms of Thrust algorithms like reduce, transform, sort, etc. In this case we can write the computation in terms of transform, since the loop was just computing a function F(fi[i], fj[i]) and storing the result in df[i]. Note that we must first move the input arrays to the device before calling transform because Thrust requires the input and output arrays to live in the same place.
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <cstdio>
struct my_functor
: public thrust::binary_function<float,float,float>
{
__host__ __device__
float operator()(float fi, float fj)
{
float d = fi - fj;
if (d < 0)
d = 0;
else
d = d * d;
if (d > 255)
d = 255;
return d;
}
};
int main(void)
{
size_t N = 5;
// allocate storage on host
thrust::host_vector<float> cpu_fi(N);
thrust::host_vector<float> cpu_fj(N);
thrust::host_vector<float> cpu_df(N);
// initialze fi and fj arrays
cpu_fi[0] = 2.0; cpu_fj[0] = 0.0;
cpu_fi[1] = 0.0; cpu_fj[1] = 2.0;
cpu_fi[2] = 3.0; cpu_fj[2] = 1.0;
cpu_fi[3] = 4.0; cpu_fj[3] = 5.0;
cpu_fi[4] = 8.0; cpu_fj[4] = -8.0;
// copy fi and fj to device
thrust::device_vector<float> gpu_fi = cpu_fi;
thrust::device_vector<float> gpu_fj = cpu_fj;
// allocate storage for df
thrust::device_vector<float> gpu_df(N);
// perform transformation
thrust::transform(gpu_fi.begin(), gpu_fi.end(), // first input range
gpu_fj.begin(), // second input range
gpu_df.begin(), // output range
my_functor()); // functor to apply
// copy results back to host
thrust::copy(gpu_df.begin(), gpu_df.end(), cpu_df.begin());
// print results on host
for (size_t i = 0; i < N; i++)
printf("f(%2.0lf,%2.0lf) = %3.0lf\n", cpu_fi[i], cpu_fj[i], cpu_df[i]);
return 0;
}
For reference, here's the output of the program:
f( 2, 0) = 4
f( 0, 2) = 0
f( 3, 1) = 4
f( 4, 5) = 0
f( 8,-8) = 255