hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
71cd84215b08eb837dfc897e434275f7d478aea9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #define ANCHOR 0 #define POSITIVE 1 #define NEGATIVE 2 __global__ void triplet_dist_kernel(const int n, const float norm, const int nb_batch, const int length, float* x, float* y) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { int row = i / nb_batch; int col = i % nb_batch; float *xrow = x + row*length; float *xcol = x + col*length; float sum = 0.0f; for (int j = 0; j < length; j++) { if (norm == 1.0f) { sum += fabsf(xrow[j] - xcol[j]); } else { sum += powf(xrow[j] - xcol[j], norm); } } if (norm == 1.0f) { y[row*nb_batch + col] = sum; } else { y[row*nb_batch + col] = powf(sum, 1.0/norm); } } } __global__ void triplet_loss_semi_kernel(const int n, const int nb_batch, const int length, const float alpha, const float* x, const float* d, const float* l, float* y, float* z) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { // find anchor embedding int idx_a = i; // find positive embedding int idx_p = i; float val_p = 0.0f; for (int j = 0; j < nb_batch; j++) { if ((l[j] == l[i]) & (val_p < d[i*nb_batch + j])) { idx_p = j; val_p = d[i*nb_batch + j]; } } // find negative embedding int idx_n = i; float val_n = FLT_MAX; for (int j = 0; j < nb_batch; j++) { if ((l[j] != l[i]) & (val_p < d[i*nb_batch + j]) & (val_n > d[i*nb_batch + j])) { idx_n = j; val_n = d[i*nb_batch + j]; } } // keep track of embedding indices y[i*3 + ANCHOR] = idx_a; y[i*3 + POSITIVE] = idx_p; y[i*3 + NEGATIVE] = idx_n; // loss = max((a - p)^2 - (a - n)^2 + alpha, 0) float sum = 0.0f; for (int j = 0; j < length; j++) sum += powf(x[idx_a*length + j] - x[idx_p*length + j], 2); for (int j = 0; j < length; j++) sum -= powf(x[idx_a*length + j] - x[idx_n*length + j], 2); z[i] = fmaxf(sum + alpha, 0.0f); } } __global__ void triplet_loss_semi_allpairs_kernel(const int n, const int nb_batch, const int length, const int nb_blocks, const int samples, const float alpha, const float* x, const float* d, const float* l, float* y, float* z) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { // pick each element from positive diagonal block in distance matrix int row = i % (nb_blocks*samples); int col = i / (nb_blocks*samples) + (row / samples)*samples; int dst = (i / (nb_blocks*samples))*nb_batch + i % (nb_blocks*samples); if (col >= row) col++; // find anchor embedding int idx_a = row; // find positive embedding int idx_p = col; float val_p = d[idx_a*nb_batch + idx_p]; // find negative embedding int idx_n = idx_a; float val_n = FLT_MAX; for (int j = nb_blocks*samples; j < nb_batch; j++) { if ((l[j] != l[idx_a]) & (val_p < d[idx_a*nb_batch + j]) & (val_n > d[idx_a*nb_batch + j])) { idx_n = j; val_n = d[idx_a*nb_batch + j]; } } // keep track of embedding indices y[dst*3 + ANCHOR] = idx_a; y[dst*3 + POSITIVE] = idx_p; y[dst*3 + NEGATIVE] = idx_n; // loss = max((a - p)^2 - (a - n)^2 + alpha, 0) float sum = 0.0f; for (int j = 0; j < length; j++) sum += powf(x[idx_a*length + j] - x[idx_p*length + j], 2); for (int j = 0; j < length; j++) sum -= powf(x[idx_a*length + j] - x[idx_n*length + j], 2); z[dst] = fmaxf(sum + alpha, 0.0f); } } static int triplet_TripletCriterion_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *label = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float norm = luaT_getfieldchecknumber(L, 1, "norm"); float alpha = luaT_getfieldchecknumber(L, 1, "alpha"); int samples = luaT_getfieldchecknumber(L, 1, "samples"); int nb_blocks = luaT_getfieldchecknumber(L, 1, "blocks"); THCudaTensor *dist = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "dist", "torch.CudaTensor"); THCudaTensor *emb = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "embeddings", "torch.CudaTensor"); THCudaTensor *loss = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "loss", "torch.CudaTensor"); long nb_batch = input->size[0]; long length = input->size[1]; // prepare place holder input = THCudaTensor_newContiguous(state, input); THCudaTensor_resize2d(state, dist, nb_batch, nb_batch); if (samples == 1) { THCudaTensor_resize2d(state, emb, nb_batch, 3); THCudaTensor_resize1d(state, loss, nb_batch); } else { THCudaTensor_resize2d(state, emb, nb_batch*(samples-1), 3); THCudaTensor_resize1d(state, loss, nb_batch*(samples-1)); THCudaTensor_zero(state, emb); THCudaTensor_zero(state, loss); } // queue kernel (dist matrix) long num_threads = nb_batch*nb_batch; hipLaunchKernelGGL(( triplet_dist_kernel) , dim3(GET_BLOCKS(num_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), num_threads, norm, nb_batch, length, THCudaTensor_data(state, input), THCudaTensor_data(state, dist) ); // queue kernel (find embeddings) if (samples > 1) { num_threads = nb_blocks*samples*(samples-1); hipLaunchKernelGGL(( triplet_loss_semi_allpairs_kernel) , dim3(GET_BLOCKS(num_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), num_threads, nb_batch, length, nb_blocks, samples, alpha, THCudaTensor_data(state, input), THCudaTensor_data(state, dist), THCudaTensor_data(state, label), THCudaTensor_data(state, emb), THCudaTensor_data(state, loss) ); } else { num_threads = nb_batch; hipLaunchKernelGGL(( triplet_loss_semi_kernel) , dim3(GET_BLOCKS(num_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), num_threads, nb_batch, length, alpha, THCudaTensor_data(state, input), THCudaTensor_data(state, dist), THCudaTensor_data(state, label), THCudaTensor_data(state, emb), THCudaTensor_data(state, loss) ); } // close THCudaTensor_free(state, input); return 1; } __global__ void triplet_prop_kernel(const int n, const int nb_pairs, const int length, const float* x, const float* emb, const float *loss, float* y) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { int row = i / length; int col = i % length; if (loss[row] > 0) { y[i] = (x[((int)emb[3*row+2])*length + col] - x[((int)emb[3*row+1])*length + col])*2.0/((float)nb_pairs); } else { y[i] = 0; } } } static int triplet_TripletCriterion_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *emb = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "embeddings", "torch.CudaTensor"); THCudaTensor *loss = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "loss", "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); long nb_pairs = loss->size[0]; long length = input->size[1]; THCudaTensor_resize2d(state, gradInput, nb_pairs, length); // queue kernel long num_threads = nb_pairs*length; hipLaunchKernelGGL(( triplet_prop_kernel) , dim3(GET_BLOCKS(num_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), num_threads, nb_pairs, length, THCudaTensor_data(state, input), THCudaTensor_data(state, emb), THCudaTensor_data(state, loss), THCudaTensor_data(state, gradInput) ); return 1; } static const struct luaL_Reg triplet_TripletCriterion__ [] = { {"TripletCriterion_updateOutput", triplet_TripletCriterion_updateOutput}, {"TripletCriterion_updateGradInput", triplet_TripletCriterion_updateGradInput}, {NULL, NULL} }; static void triplet_TripletCriterion_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, triplet_TripletCriterion__, "nn"); lua_pop(L,1); }
71cd84215b08eb837dfc897e434275f7d478aea9.cu
#include "common.h" #define ANCHOR 0 #define POSITIVE 1 #define NEGATIVE 2 __global__ void triplet_dist_kernel(const int n, const float norm, const int nb_batch, const int length, float* x, float* y) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { int row = i / nb_batch; int col = i % nb_batch; float *xrow = x + row*length; float *xcol = x + col*length; float sum = 0.0f; for (int j = 0; j < length; j++) { if (norm == 1.0f) { sum += fabsf(xrow[j] - xcol[j]); } else { sum += powf(xrow[j] - xcol[j], norm); } } if (norm == 1.0f) { y[row*nb_batch + col] = sum; } else { y[row*nb_batch + col] = powf(sum, 1.0/norm); } } } __global__ void triplet_loss_semi_kernel(const int n, const int nb_batch, const int length, const float alpha, const float* x, const float* d, const float* l, float* y, float* z) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { // find anchor embedding int idx_a = i; // find positive embedding int idx_p = i; float val_p = 0.0f; for (int j = 0; j < nb_batch; j++) { if ((l[j] == l[i]) & (val_p < d[i*nb_batch + j])) { idx_p = j; val_p = d[i*nb_batch + j]; } } // find negative embedding int idx_n = i; float val_n = FLT_MAX; for (int j = 0; j < nb_batch; j++) { if ((l[j] != l[i]) & (val_p < d[i*nb_batch + j]) & (val_n > d[i*nb_batch + j])) { idx_n = j; val_n = d[i*nb_batch + j]; } } // keep track of embedding indices y[i*3 + ANCHOR] = idx_a; y[i*3 + POSITIVE] = idx_p; y[i*3 + NEGATIVE] = idx_n; // loss = max((a - p)^2 - (a - n)^2 + alpha, 0) float sum = 0.0f; for (int j = 0; j < length; j++) sum += powf(x[idx_a*length + j] - x[idx_p*length + j], 2); for (int j = 0; j < length; j++) sum -= powf(x[idx_a*length + j] - x[idx_n*length + j], 2); z[i] = fmaxf(sum + alpha, 0.0f); } } __global__ void triplet_loss_semi_allpairs_kernel(const int n, const int nb_batch, const int length, const int nb_blocks, const int samples, const float alpha, const float* x, const float* d, const float* l, float* y, float* z) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { // pick each element from positive diagonal block in distance matrix int row = i % (nb_blocks*samples); int col = i / (nb_blocks*samples) + (row / samples)*samples; int dst = (i / (nb_blocks*samples))*nb_batch + i % (nb_blocks*samples); if (col >= row) col++; // find anchor embedding int idx_a = row; // find positive embedding int idx_p = col; float val_p = d[idx_a*nb_batch + idx_p]; // find negative embedding int idx_n = idx_a; float val_n = FLT_MAX; for (int j = nb_blocks*samples; j < nb_batch; j++) { if ((l[j] != l[idx_a]) & (val_p < d[idx_a*nb_batch + j]) & (val_n > d[idx_a*nb_batch + j])) { idx_n = j; val_n = d[idx_a*nb_batch + j]; } } // keep track of embedding indices y[dst*3 + ANCHOR] = idx_a; y[dst*3 + POSITIVE] = idx_p; y[dst*3 + NEGATIVE] = idx_n; // loss = max((a - p)^2 - (a - n)^2 + alpha, 0) float sum = 0.0f; for (int j = 0; j < length; j++) sum += powf(x[idx_a*length + j] - x[idx_p*length + j], 2); for (int j = 0; j < length; j++) sum -= powf(x[idx_a*length + j] - x[idx_n*length + j], 2); z[dst] = fmaxf(sum + alpha, 0.0f); } } static int triplet_TripletCriterion_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *label = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float norm = luaT_getfieldchecknumber(L, 1, "norm"); float alpha = luaT_getfieldchecknumber(L, 1, "alpha"); int samples = luaT_getfieldchecknumber(L, 1, "samples"); int nb_blocks = luaT_getfieldchecknumber(L, 1, "blocks"); THCudaTensor *dist = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "dist", "torch.CudaTensor"); THCudaTensor *emb = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "embeddings", "torch.CudaTensor"); THCudaTensor *loss = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "loss", "torch.CudaTensor"); long nb_batch = input->size[0]; long length = input->size[1]; // prepare place holder input = THCudaTensor_newContiguous(state, input); THCudaTensor_resize2d(state, dist, nb_batch, nb_batch); if (samples == 1) { THCudaTensor_resize2d(state, emb, nb_batch, 3); THCudaTensor_resize1d(state, loss, nb_batch); } else { THCudaTensor_resize2d(state, emb, nb_batch*(samples-1), 3); THCudaTensor_resize1d(state, loss, nb_batch*(samples-1)); THCudaTensor_zero(state, emb); THCudaTensor_zero(state, loss); } // queue kernel (dist matrix) long num_threads = nb_batch*nb_batch; triplet_dist_kernel <<<GET_BLOCKS(num_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>> ( num_threads, norm, nb_batch, length, THCudaTensor_data(state, input), THCudaTensor_data(state, dist) ); // queue kernel (find embeddings) if (samples > 1) { num_threads = nb_blocks*samples*(samples-1); triplet_loss_semi_allpairs_kernel <<<GET_BLOCKS(num_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>> ( num_threads, nb_batch, length, nb_blocks, samples, alpha, THCudaTensor_data(state, input), THCudaTensor_data(state, dist), THCudaTensor_data(state, label), THCudaTensor_data(state, emb), THCudaTensor_data(state, loss) ); } else { num_threads = nb_batch; triplet_loss_semi_kernel <<<GET_BLOCKS(num_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>> ( num_threads, nb_batch, length, alpha, THCudaTensor_data(state, input), THCudaTensor_data(state, dist), THCudaTensor_data(state, label), THCudaTensor_data(state, emb), THCudaTensor_data(state, loss) ); } // close THCudaTensor_free(state, input); return 1; } __global__ void triplet_prop_kernel(const int n, const int nb_pairs, const int length, const float* x, const float* emb, const float *loss, float* y) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { int row = i / length; int col = i % length; if (loss[row] > 0) { y[i] = (x[((int)emb[3*row+2])*length + col] - x[((int)emb[3*row+1])*length + col])*2.0/((float)nb_pairs); } else { y[i] = 0; } } } static int triplet_TripletCriterion_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *emb = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "embeddings", "torch.CudaTensor"); THCudaTensor *loss = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "loss", "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); long nb_pairs = loss->size[0]; long length = input->size[1]; THCudaTensor_resize2d(state, gradInput, nb_pairs, length); // queue kernel long num_threads = nb_pairs*length; triplet_prop_kernel <<<GET_BLOCKS(num_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>> ( num_threads, nb_pairs, length, THCudaTensor_data(state, input), THCudaTensor_data(state, emb), THCudaTensor_data(state, loss), THCudaTensor_data(state, gradInput) ); return 1; } static const struct luaL_Reg triplet_TripletCriterion__ [] = { {"TripletCriterion_updateOutput", triplet_TripletCriterion_updateOutput}, {"TripletCriterion_updateGradInput", triplet_TripletCriterion_updateGradInput}, {NULL, NULL} }; static void triplet_TripletCriterion_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, triplet_TripletCriterion__, "nn"); lua_pop(L,1); }
fe5668353935395743b52a84e3685468747dc31b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <ctime> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" //NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt #include <roctracer/roctx.h> //Initialize sizes const int rows = 4096; const int cols = 4096; const int BLOCK_SIZE_X = 32; const int BLOCK_SIZE_Y = 32; //For unrolled transpose const int TILE = 32; const int SIDE = 8; using namespace std; #define NAIVE_TRANSPOSE 0 #define SHARED_MEM_TRANSPOSE 0 #define BANK_CONF_TRANSPOSE 0 #define UNROLLED_TRANSPOSE 0 struct DIMS { dim3 dimBlock; dim3 dimGrid; }; #define CUDA(call) do { \ hipError_t e = (call); \ if (e == hipSuccess) break; \ fprintf(stderr, __FILE__":%d: %s (%d)\n", \ __LINE__, hipGetErrorString(e), e); \ exit(1); \ } while (0) double diffclock( clock_t clock1, clock_t clock2 ) { double diffticks = clock1 - clock2; double diffms = diffticks / ( CLOCKS_PER_SEC / 1000.0); return diffms; } inline unsigned divup(unsigned n, unsigned div) { return (n + div - 1) / div; } // Check errors void postprocess(const float *ref, const float *res, int n) { bool passed = true; for (int i = 0; i < n; i++) { if (res[i] != ref[i]) { printf("ID:%d \t Res:%f \t Ref:%f\n", i, res[i], ref[i]); printf("%25s\n", "*** FAILED ***"); passed = false; break; } } if(passed) printf("Post process check passed!!\n"); } void preprocess(float *res, float *dev_res, int n) { for (int i = 0; i < n; i++) { res[i] = -1; } hipMemset(dev_res, -1, n * sizeof(float)); } __global__ void copyKernel(const float* __restrict__ const a, float* __restrict__ const b) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row int j = blockIdx.x * blockDim.x + threadIdx.x; // col int index_in = i*cols+j; // (i,j) from matrix A b[index_in] = a[index_in]; } __global__ void matrixTransposeNaive(const float* __restrict__ const a, float* __restrict__ const b) { //HINT: Look at copyKernel above int i = 0; // Compute row int j = 0; // Compute col int index_in = 0; // Compute input index (i,j) from matrix A int index_out = 0; // Compute output index (j,i) in matrix B = transpose(A) // Copy data from A to B } __global__ void matrixTransposeShared(const float* __restrict__ const a, float* __restrict__ const b) { //Allocate appropriate shared memory //Compute input and output index //Copy data from input to shared memory //Copy data from shared memory to global memory } __global__ void matrixTransposeSharedwBC(const float* __restrict__ const a, float* __restrict__ const b) { //HINT: Copy code from matrixTransposeShared kernel, while solving bank conflict problem //Allocate appropriate shared memory //Compute input and output index //Copy data from input to shared memory //Copy data from shared memory to global memory } __global__ void matrixTransposeUnrolled(const float* __restrict__ const a, float* __restrict__ const b) { //Allocate appropriate shared memory //Compute input and output index //Copy data from input to shared memory. Multiple copies per thread. //Copy data from shared memory to global memory. Multiple copies per thread. } int main(int argc, char *argv[]) { //Run Memcpy benchmarks nvtxRangeId_t cudaBenchmark = roctxRangeStart("CUDA Memcpy Benchmark"); #if defined WIN64 system(".\\..\\bin\\cudaBenchmark.exe"); #elif defined LINUX system("./bin/cudaBenchmark"); #endif roctxRangeStop(cudaBenchmark); // Host arrays. float* a = new float[rows*cols]; float* b = new float[rows*cols]; float* a_gold = new float[rows*cols]; float* b_gold = new float[rows*cols]; // Device arrays float *d_a, *d_b; // Allocate memory on the device CUDA( hipMalloc((void **) &d_a, rows*cols*sizeof(float)) ); CUDA( hipMalloc((void **) &d_b, rows*cols*sizeof(float)) ); // Fill matrix A for (int i = 0; i < rows * cols; i++) a[i] = (float)i; cout << endl; // Copy array contents of A from the host (CPU) to the device (GPU) hipMemcpy(d_a, a, rows*cols*sizeof(float), hipMemcpyHostToDevice); //Compute "gold" reference standard for(int ii = 0; ii < rows; ii++) { for(int jj = 0; jj < cols; jj++) { a_gold[jj * rows + ii] = a[jj * cols + ii]; b_gold[ii * cols + jj] = a[jj * cols + ii]; } } hipDeviceSynchronize(); // Create CUDA events for timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); cout << "***Launch the transpose!***" << endl << endl; #define CPU_TRANSPOSE #ifdef CPU_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***CPU Transpose***" << endl; { // start the timer nvtxRangeId_t cpuBenchmark = roctxRangeStart("CPU Transpose Benchmark"); clock_t begin = clock(); int iters = 10; for (int k=0; k<iters; k++) { for(int ii = 0; ii < rows; ii++) for(int jj = 0; jj < cols; jj++) b[ii * cols + jj] = a[jj * cols + ii]; } // stop the timer clock_t end = clock(); roctxRangeStop(cpuBenchmark); float time = 0.0f; time = diffclock(end, begin); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Device To Device Copy***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); dims.dimGrid = dim3(divup(rows, BLOCK_SIZE_X), divup(cols, BLOCK_SIZE_Y), 1 ); // start the timer nvtxRangeId_t naiveBenchmark = roctxRangeStart("Device to Device Copy"); hipEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel hipLaunchKernelGGL(( copyKernel), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b); } // stop the timer hipEventRecord( stop, 0); hipEventSynchronize( stop ); roctxRangeStop(naiveBenchmark); float time = 0.0f; hipEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); //3.0 for read of A and read and write of B cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) hipMemcpy(b, d_b, cols*rows*sizeof(float), hipMemcpyDeviceToHost); postprocess(a_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #if NAIVE_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Naive Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" // HINT: Look above for copy kernel dims computation DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t naiveBenchmark = roctxRangeStart("Naive Transpose Benchmark"); hipEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel hipLaunchKernelGGL(( matrixTransposeNaive), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b); } // stop the timer hipEventRecord( stop, 0); hipEventSynchronize( stop ); roctxRangeStop(naiveBenchmark); float time = 0.0f; hipEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) hipMemcpy(b, d_b, cols*rows*sizeof(float), hipMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif #if SHARED_MEM_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Shared Memory Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t sharedMemBenchmark = roctxRangeStart("Shared Memory Transpose Benchmark"); hipEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel hipLaunchKernelGGL(( matrixTransposeShared), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b); } // stop the timer hipEventRecord( stop, 0); hipEventSynchronize( stop ); roctxRangeStop(sharedMemBenchmark); float time = 0.0f; hipEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) hipMemcpy(b, d_b, cols*rows*sizeof(float), hipMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif #if BANK_CONF_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Without Bank Conflicts Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t sharedMemBenchmark = roctxRangeStart("Shared Memory Transpose Benchmark"); hipEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel hipLaunchKernelGGL(( matrixTransposeSharedwBC), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b); } // stop the timer hipEventRecord( stop, 0); hipEventSynchronize( stop ); roctxRangeStop(sharedMemBenchmark); float time = 0.0f; hipEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) hipMemcpy(b, d_b, cols*rows*sizeof(float), hipMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif #if UNROLLED_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Unrolled Loops Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of TILE x SIDE x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t unrolledBenchmark = roctxRangeStart("Shared Memory Transpose Benchmark"); hipEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel hipLaunchKernelGGL(( matrixTransposeUnrolled), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b); } // stop the timer hipEventRecord( stop, 0); hipEventSynchronize( stop ); roctxRangeStop(unrolledBenchmark); float time = 0.0f; hipEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) hipMemcpy(b, d_b, cols*rows*sizeof(float), hipMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif // copy the answer back to the host (CPU) from the device (GPU) /* cout << "Entries of B: \n"; for (int i = 0; i < 32; i++) { cout << b[i] << " "; } cout << endl; for (int i = 0; i < 32; i++) { cout << b[i * cols] << " "; } cout << endl; */ // free device memory hipFree(d_a); hipFree(d_b); // free host memory delete[] a; delete[] b; //Destroy Events CUDA(hipEventDestroy(start)); CUDA(hipEventDestroy(stop)); //CUDA Reset for NVProf CUDA(hipDeviceReset()); // successful program termination return 0; }
fe5668353935395743b52a84e3685468747dc31b.cu
#include <stdio.h> #include <iostream> #include <ctime> #include "cuda_runtime.h" #include "device_launch_parameters.h" //NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt #include <nvToolsExt.h> //Initialize sizes const int rows = 4096; const int cols = 4096; const int BLOCK_SIZE_X = 32; const int BLOCK_SIZE_Y = 32; //For unrolled transpose const int TILE = 32; const int SIDE = 8; using namespace std; #define NAIVE_TRANSPOSE 0 #define SHARED_MEM_TRANSPOSE 0 #define BANK_CONF_TRANSPOSE 0 #define UNROLLED_TRANSPOSE 0 struct DIMS { dim3 dimBlock; dim3 dimGrid; }; #define CUDA(call) do { \ cudaError_t e = (call); \ if (e == cudaSuccess) break; \ fprintf(stderr, __FILE__":%d: %s (%d)\n", \ __LINE__, cudaGetErrorString(e), e); \ exit(1); \ } while (0) double diffclock( clock_t clock1, clock_t clock2 ) { double diffticks = clock1 - clock2; double diffms = diffticks / ( CLOCKS_PER_SEC / 1000.0); return diffms; } inline unsigned divup(unsigned n, unsigned div) { return (n + div - 1) / div; } // Check errors void postprocess(const float *ref, const float *res, int n) { bool passed = true; for (int i = 0; i < n; i++) { if (res[i] != ref[i]) { printf("ID:%d \t Res:%f \t Ref:%f\n", i, res[i], ref[i]); printf("%25s\n", "*** FAILED ***"); passed = false; break; } } if(passed) printf("Post process check passed!!\n"); } void preprocess(float *res, float *dev_res, int n) { for (int i = 0; i < n; i++) { res[i] = -1; } cudaMemset(dev_res, -1, n * sizeof(float)); } __global__ void copyKernel(const float* __restrict__ const a, float* __restrict__ const b) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row int j = blockIdx.x * blockDim.x + threadIdx.x; // col int index_in = i*cols+j; // (i,j) from matrix A b[index_in] = a[index_in]; } __global__ void matrixTransposeNaive(const float* __restrict__ const a, float* __restrict__ const b) { //HINT: Look at copyKernel above int i = 0; // Compute row int j = 0; // Compute col int index_in = 0; // Compute input index (i,j) from matrix A int index_out = 0; // Compute output index (j,i) in matrix B = transpose(A) // Copy data from A to B } __global__ void matrixTransposeShared(const float* __restrict__ const a, float* __restrict__ const b) { //Allocate appropriate shared memory //Compute input and output index //Copy data from input to shared memory //Copy data from shared memory to global memory } __global__ void matrixTransposeSharedwBC(const float* __restrict__ const a, float* __restrict__ const b) { //HINT: Copy code from matrixTransposeShared kernel, while solving bank conflict problem //Allocate appropriate shared memory //Compute input and output index //Copy data from input to shared memory //Copy data from shared memory to global memory } __global__ void matrixTransposeUnrolled(const float* __restrict__ const a, float* __restrict__ const b) { //Allocate appropriate shared memory //Compute input and output index //Copy data from input to shared memory. Multiple copies per thread. //Copy data from shared memory to global memory. Multiple copies per thread. } int main(int argc, char *argv[]) { //Run Memcpy benchmarks nvtxRangeId_t cudaBenchmark = nvtxRangeStart("CUDA Memcpy Benchmark"); #if defined WIN64 system(".\\..\\bin\\cudaBenchmark.exe"); #elif defined LINUX system("./bin/cudaBenchmark"); #endif nvtxRangeEnd(cudaBenchmark); // Host arrays. float* a = new float[rows*cols]; float* b = new float[rows*cols]; float* a_gold = new float[rows*cols]; float* b_gold = new float[rows*cols]; // Device arrays float *d_a, *d_b; // Allocate memory on the device CUDA( cudaMalloc((void **) &d_a, rows*cols*sizeof(float)) ); CUDA( cudaMalloc((void **) &d_b, rows*cols*sizeof(float)) ); // Fill matrix A for (int i = 0; i < rows * cols; i++) a[i] = (float)i; cout << endl; // Copy array contents of A from the host (CPU) to the device (GPU) cudaMemcpy(d_a, a, rows*cols*sizeof(float), cudaMemcpyHostToDevice); //Compute "gold" reference standard for(int ii = 0; ii < rows; ii++) { for(int jj = 0; jj < cols; jj++) { a_gold[jj * rows + ii] = a[jj * cols + ii]; b_gold[ii * cols + jj] = a[jj * cols + ii]; } } cudaDeviceSynchronize(); // Create CUDA events for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cout << "***Launch the transpose!***" << endl << endl; #define CPU_TRANSPOSE #ifdef CPU_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***CPU Transpose***" << endl; { // start the timer nvtxRangeId_t cpuBenchmark = nvtxRangeStart("CPU Transpose Benchmark"); clock_t begin = clock(); int iters = 10; for (int k=0; k<iters; k++) { for(int ii = 0; ii < rows; ii++) for(int jj = 0; jj < cols; jj++) b[ii * cols + jj] = a[jj * cols + ii]; } // stop the timer clock_t end = clock(); nvtxRangeEnd(cpuBenchmark); float time = 0.0f; time = diffclock(end, begin); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Device To Device Copy***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); dims.dimGrid = dim3(divup(rows, BLOCK_SIZE_X), divup(cols, BLOCK_SIZE_Y), 1 ); // start the timer nvtxRangeId_t naiveBenchmark = nvtxRangeStart("Device to Device Copy"); cudaEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel copyKernel<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b); } // stop the timer cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); nvtxRangeEnd(naiveBenchmark); float time = 0.0f; cudaEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); //3.0 for read of A and read and write of B cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) cudaMemcpy(b, d_b, cols*rows*sizeof(float), cudaMemcpyDeviceToHost); postprocess(a_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #if NAIVE_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Naive Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" // HINT: Look above for copy kernel dims computation DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t naiveBenchmark = nvtxRangeStart("Naive Transpose Benchmark"); cudaEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel matrixTransposeNaive<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b); } // stop the timer cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); nvtxRangeEnd(naiveBenchmark); float time = 0.0f; cudaEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) cudaMemcpy(b, d_b, cols*rows*sizeof(float), cudaMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif #if SHARED_MEM_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Shared Memory Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t sharedMemBenchmark = nvtxRangeStart("Shared Memory Transpose Benchmark"); cudaEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel matrixTransposeShared<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b); } // stop the timer cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); nvtxRangeEnd(sharedMemBenchmark); float time = 0.0f; cudaEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) cudaMemcpy(b, d_b, cols*rows*sizeof(float), cudaMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif #if BANK_CONF_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Without Bank Conflicts Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t sharedMemBenchmark = nvtxRangeStart("Shared Memory Transpose Benchmark"); cudaEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel matrixTransposeSharedwBC<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b); } // stop the timer cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); nvtxRangeEnd(sharedMemBenchmark); float time = 0.0f; cudaEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) cudaMemcpy(b, d_b, cols*rows*sizeof(float), cudaMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif #if UNROLLED_TRANSPOSE //////////////////////////////////////////////////////////// cout << "******************************************" << endl; cout << "***Unrolled Loops Transpose***" << endl; { preprocess(b, d_b, rows*cols); // Assign a 2D distribution of TILE x SIDE x 1 CUDA threads within // Calculate number of blocks along X and Y in a 2D CUDA "grid" DIMS dims; dims.dimBlock = dim3(1, 1, 1); dims.dimGrid = dim3(1, 1, 1 ); // start the timer nvtxRangeId_t unrolledBenchmark = nvtxRangeStart("Shared Memory Transpose Benchmark"); cudaEventRecord( start, 0); int iters = 100; for (int i=0; i<iters; i++) { // Launch the GPU kernel matrixTransposeUnrolled<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b); } // stop the timer cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); nvtxRangeEnd(unrolledBenchmark); float time = 0.0f; cudaEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double Bandwidth = (double)iters*2.0*1000.0*(double)(rows*cols*sizeof(float)) / (1000.0*1000.0*1000.0*time); cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl; cout << "Bandwidth (GB/s) = " << Bandwidth << endl; // copy the answer back to the host (CPU) from the device (GPU) cudaMemcpy(b, d_b, cols*rows*sizeof(float), cudaMemcpyDeviceToHost); postprocess(b_gold, b, rows * cols); } cout << "******************************************" << endl; cout << endl; //////////////////////////////////////////////////////////// #endif // copy the answer back to the host (CPU) from the device (GPU) /* cout << "Entries of B: \n"; for (int i = 0; i < 32; i++) { cout << b[i] << " "; } cout << endl; for (int i = 0; i < 32; i++) { cout << b[i * cols] << " "; } cout << endl; */ // free device memory cudaFree(d_a); cudaFree(d_b); // free host memory delete[] a; delete[] b; //Destroy Events CUDA(cudaEventDestroy(start)); CUDA(cudaEventDestroy(stop)); //CUDA Reset for NVProf CUDA(cudaDeviceReset()); // successful program termination return 0; }
4c75f09198baa4a171d27712cd41f1968d62d42b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "util.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define H(a) (-a * log2f(a)) #define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \ H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f))) /* Makra do sumowania tablicy 2 x 3 x 3 */ #define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2]) #define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3]) #define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3]) #define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2)) #define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2)) #define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3)) __device__ float compute_gig_1_2(int v1_p, int v2_p, char *vars, char *ds, int vars_width, int num_objects, float p) { int count[2][3][3] = { 0 }; #pragma unroll 4 for (int i = 0; i < num_objects; ++i) { char d = (ds[i / 8] >> (i % 8)) & 1; char v1 = (vars[i * vars_width + v1_p / 4] >> ((v1_p % 4) * 2)) & 3; char v2 = (vars[i * vars_width + v2_p / 4] >> ((v2_p % 4) * 2)) & 3; count[d][v1][v2]++; } float ig1, ig2, ig12, h_p; h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p); ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) - SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) - SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p); ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) - SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) - SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p); ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) - SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) - SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) - SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) - SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) - SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) - SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) - SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) - SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p); return ig12 - ((ig1 > ig2) ? ig1 : ig2); } __device__ float compute_gig_1_2_ds(int v1_p, int v2_p, char *vars1, char *vars2, char *ds, int vars1_width, int vars2_width, int num_objects, float p) { int count[2][3][3] = { 0 }; #pragma unroll 4 for (int i = 0; i < num_objects; ++i) { char d = (ds[i / 8] >> (i % 8)) & 1; char v1 = (vars1[i * vars1_width + v1_p / 4] >> ((v1_p % 4) * 2)) & 3; char v2 = (vars2[i * vars2_width + v2_p / 4] >> ((v2_p % 4) * 2)) & 3; count[d][v1][v2]++; } float ig1, ig2, ig12, h_p; h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p); ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) - SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) - SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p); ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) - SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) - SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p); ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) - SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) - SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) - SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) - SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) - SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) - SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) - SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) - SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p); return ig12 - ((ig1 > ig2) ? ig1 : ig2); } __global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p) { int v1_p = blockIdx.x * blockDim.x + threadIdx.x; int v2_p = blockIdx.y * blockDim.y + threadIdx.y; if (v1_p >= v2_p) return; if (v1_p >= num_vars) return; if (v2_p >= num_vars) return; const int num_v_padded = padToMultipleOf(num_vars, 32) / 4; r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(v1_p, v2_p, vars, ds, num_v_padded, num_objects, p); } struct GigStruct { float gig; int v1, v2; }; __global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars, struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs, float p, float threshold) { if (blockIdx.x * blockDim.x >= (blockIdx.y + 1) * blockDim.y - 1) return; int v1_p = blockIdx.x * blockDim.x + threadIdx.x; int v2_p = blockIdx.y * blockDim.y + threadIdx.y; const int num_v_padded = padToMultipleOf(num_vars, 32) / 4; const int thread_n = blockDim.x * threadIdx.y + threadIdx.x; extern __shared__ char shared[]; const int shared_vars_width = blockDim.y / 4; const int shared_vars_size = shared_vars_width * num_objects; for (int i = thread_n; i < shared_vars_size; i += blockDim.x * blockDim.y) shared[i] = vars[(i / shared_vars_width) * num_v_padded + blockIdx.y * blockDim.y / 4 + (i % shared_vars_width)]; const int ds_size = ((num_objects - 1) / 8 + 1); for (int i = thread_n; i < ds_size; i += blockDim.x * blockDim.y) shared[shared_vars_size + i] = ds[i]; __syncthreads(); if (v1_p >= v2_p) return; if (v1_p >= num_vars) return; if (v2_p >= num_vars) return; float gig = compute_gig_1_2_ds(v1_p, threadIdx.y, vars, shared, &shared[shared_vars_size], num_v_padded, shared_vars_width, num_objects, p); if (gig < threshold) return; /* atomicInc() wraps around to 0 */ int num = atomicAdd(num_gig_structs, 1); if (num < max_num_gig_structs) { r_gig[num].gig = gig; r_gig[num].v1 = v1_p; r_gig[num].v2 = v2_p; } } /* Komparatory do sortowania _malejco_ */ int compare_gig(const void *a, const void *b) { if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1; else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0; else return 1; } int compare_float(const void *a, const void *b) { if (*((float*)a) > *((float*)b)) return -1; else if (*((float*)a) == *((float*)b)) return 0; else return 1; } int main() { int num_objects, num_vars, result_size, real_result_size; float a_priori, threshold; float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all; Timer timer; timer.start(); scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori); Sync2BitArray2D vars(num_objects, padToMultipleOf(num_vars, 32)); SyncBitArray ds(num_objects); /* Czytamy dane */ { for (int i = 0; i < num_objects; ++i) { int a; scanf("%d", &a); a &= 1; ds.setHost(i, a); for (int j = 0; j < num_vars; ++j) { int b; scanf("%d", &b); b &= 3; vars.setHost(i, j, b); } } input = timer.lap(); } /* Kopiujemy dane na kart */ { vars.syncToDevice(); ds.syncToDevice(); copy = timer.lap(); } /* Wykonujemy zrandomizowan prb na pierwszym 10% zmiennych */ { int random_trial_size = num_vars / 10; /* Alokacja pamici na wynikowe GIG si nie udaje gdy pami jest > ok. 400MB. XXX: Tablica gig nie musiaaby by kwadratowa. */ if (random_trial_size > 8192) random_trial_size = 8192; float percent = (float)random_trial_size / (float)num_vars ; SyncArray2D<float> gig(random_trial_size, random_trial_size); dim3 block_size(32, 32); dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x, padToMultipleOf(random_trial_size, block_size.y) / block_size.y); hipLaunchKernelGGL(( compute_gig_kernel), dim3(grid_size), dim3(block_size), 0, 0, (char*)vars.getDevice(), (char*)ds.getDevice(), num_objects, random_trial_size, (float*)gig.getDevice(), a_priori); CUDA_CALL(hipGetLastError()); hipDeviceSynchronize(); random_trial_kernel = timer.lap(); gig.syncToHost(); random_trial_copy = timer.lap(); /* Przepisujemy obliczone GIG do spjnego kawaka pamici, sortujemy i wybieramy odpowiedni element jako threshold */ { int num_gig = 0; float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size); for (int v1_p = 0; v1_p < random_trial_size; ++v1_p) for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p) gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p); qsort(gig_sorted, num_gig, sizeof(float), compare_float); /* gig_sorted jest posortowany malejco */ threshold = gig_sorted[(int)((float)result_size * percent * percent)]; free(gig_sorted); } random_trial_process = timer.lap(); } /* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem, ktry zapisuje tylko wartoci wiksze ni threshold */ { const int max_num_structs = result_size * 2; SyncArray<struct GigStruct> gig_structs(max_num_structs); SyncVar<int> num_structs; int y_size = 4; if (num_objects < 24000) y_size = 8; if (num_objects < 12000) y_size = 16; if (num_objects < 6000) y_size = 32; dim3 block_size(32, y_size); dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x, padToMultipleOf(num_vars, block_size.y) / block_size.y); hipLaunchKernelGGL(( compute_gig_wt_kernel), dim3(grid_size), dim3(block_size), (y_size / 4 * num_objects) + ((num_objects - 1) / 8 + 1), 0, (char*)vars.getDevice(), (char*)ds.getDevice(), num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(), max_num_structs, num_structs.getDevice(), a_priori, threshold); CUDA_CALL(hipGetLastError()); hipDeviceSynchronize(); main_kernel = timer.lap(); num_structs.syncToHost(); gig_structs.syncToHost(); main_copy = timer.lap(); real_result_size = *num_structs.getHost(); int to_sort = real_result_size > max_num_structs ? max_num_structs : real_result_size; qsort(gig_structs.getHost(), to_sort, sizeof(struct GigStruct), compare_gig); for (int i = to_sort - 1; i >= 0; --i) printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2); main_process = timer.lap(); } all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process; fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n"); fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold); fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n"); fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all); fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f, random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f, main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f); return 0; }
4c75f09198baa4a171d27712cd41f1968d62d42b.cu
#include "util.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define H(a) (-a * log2f(a)) #define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \ H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f))) /* Makra do sumowania tablicy 2 x 3 x 3 */ #define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2]) #define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3]) #define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3]) #define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2)) #define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2)) #define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3)) __device__ float compute_gig_1_2(int v1_p, int v2_p, char *vars, char *ds, int vars_width, int num_objects, float p) { int count[2][3][3] = { 0 }; #pragma unroll 4 for (int i = 0; i < num_objects; ++i) { char d = (ds[i / 8] >> (i % 8)) & 1; char v1 = (vars[i * vars_width + v1_p / 4] >> ((v1_p % 4) * 2)) & 3; char v2 = (vars[i * vars_width + v2_p / 4] >> ((v2_p % 4) * 2)) & 3; count[d][v1][v2]++; } float ig1, ig2, ig12, h_p; h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p); ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) - SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) - SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p); ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) - SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) - SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p); ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) - SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) - SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) - SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) - SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) - SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) - SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) - SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) - SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p); return ig12 - ((ig1 > ig2) ? ig1 : ig2); } __device__ float compute_gig_1_2_ds(int v1_p, int v2_p, char *vars1, char *vars2, char *ds, int vars1_width, int vars2_width, int num_objects, float p) { int count[2][3][3] = { 0 }; #pragma unroll 4 for (int i = 0; i < num_objects; ++i) { char d = (ds[i / 8] >> (i % 8)) & 1; char v1 = (vars1[i * vars1_width + v1_p / 4] >> ((v1_p % 4) * 2)) & 3; char v2 = (vars2[i * vars2_width + v2_p / 4] >> ((v2_p % 4) * 2)) & 3; count[d][v1][v2]++; } float ig1, ig2, ig12, h_p; h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p); ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) - SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) - SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p); ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) - SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) - SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p); ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) - SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) - SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) - SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) - SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) - SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) - SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) - SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) - SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p); return ig12 - ((ig1 > ig2) ? ig1 : ig2); } __global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p) { int v1_p = blockIdx.x * blockDim.x + threadIdx.x; int v2_p = blockIdx.y * blockDim.y + threadIdx.y; if (v1_p >= v2_p) return; if (v1_p >= num_vars) return; if (v2_p >= num_vars) return; const int num_v_padded = padToMultipleOf(num_vars, 32) / 4; r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(v1_p, v2_p, vars, ds, num_v_padded, num_objects, p); } struct GigStruct { float gig; int v1, v2; }; __global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars, struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs, float p, float threshold) { if (blockIdx.x * blockDim.x >= (blockIdx.y + 1) * blockDim.y - 1) return; int v1_p = blockIdx.x * blockDim.x + threadIdx.x; int v2_p = blockIdx.y * blockDim.y + threadIdx.y; const int num_v_padded = padToMultipleOf(num_vars, 32) / 4; const int thread_n = blockDim.x * threadIdx.y + threadIdx.x; extern __shared__ char shared[]; const int shared_vars_width = blockDim.y / 4; const int shared_vars_size = shared_vars_width * num_objects; for (int i = thread_n; i < shared_vars_size; i += blockDim.x * blockDim.y) shared[i] = vars[(i / shared_vars_width) * num_v_padded + blockIdx.y * blockDim.y / 4 + (i % shared_vars_width)]; const int ds_size = ((num_objects - 1) / 8 + 1); for (int i = thread_n; i < ds_size; i += blockDim.x * blockDim.y) shared[shared_vars_size + i] = ds[i]; __syncthreads(); if (v1_p >= v2_p) return; if (v1_p >= num_vars) return; if (v2_p >= num_vars) return; float gig = compute_gig_1_2_ds(v1_p, threadIdx.y, vars, shared, &shared[shared_vars_size], num_v_padded, shared_vars_width, num_objects, p); if (gig < threshold) return; /* atomicInc() wraps around to 0 */ int num = atomicAdd(num_gig_structs, 1); if (num < max_num_gig_structs) { r_gig[num].gig = gig; r_gig[num].v1 = v1_p; r_gig[num].v2 = v2_p; } } /* Komparatory do sortowania _malejąco_ */ int compare_gig(const void *a, const void *b) { if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1; else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0; else return 1; } int compare_float(const void *a, const void *b) { if (*((float*)a) > *((float*)b)) return -1; else if (*((float*)a) == *((float*)b)) return 0; else return 1; } int main() { int num_objects, num_vars, result_size, real_result_size; float a_priori, threshold; float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all; Timer timer; timer.start(); scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori); Sync2BitArray2D vars(num_objects, padToMultipleOf(num_vars, 32)); SyncBitArray ds(num_objects); /* Czytamy dane */ { for (int i = 0; i < num_objects; ++i) { int a; scanf("%d", &a); a &= 1; ds.setHost(i, a); for (int j = 0; j < num_vars; ++j) { int b; scanf("%d", &b); b &= 3; vars.setHost(i, j, b); } } input = timer.lap(); } /* Kopiujemy dane na kartę */ { vars.syncToDevice(); ds.syncToDevice(); copy = timer.lap(); } /* Wykonujemy zrandomizowaną próbę na pierwszym 10% zmiennych */ { int random_trial_size = num_vars / 10; /* Alokacja pamięci na wynikowe GIG się nie udaje gdy pamięć jest > ok. 400MB. XXX: Tablica gig nie musiałaby być kwadratowa. */ if (random_trial_size > 8192) random_trial_size = 8192; float percent = (float)random_trial_size / (float)num_vars ; SyncArray2D<float> gig(random_trial_size, random_trial_size); dim3 block_size(32, 32); dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x, padToMultipleOf(random_trial_size, block_size.y) / block_size.y); compute_gig_kernel<<<grid_size, block_size>>>((char*)vars.getDevice(), (char*)ds.getDevice(), num_objects, random_trial_size, (float*)gig.getDevice(), a_priori); CUDA_CALL(cudaGetLastError()); cudaDeviceSynchronize(); random_trial_kernel = timer.lap(); gig.syncToHost(); random_trial_copy = timer.lap(); /* Przepisujemy obliczone GIG do spójnego kawałka pamięci, sortujemy i wybieramy odpowiedni element jako threshold */ { int num_gig = 0; float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size); for (int v1_p = 0; v1_p < random_trial_size; ++v1_p) for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p) gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p); qsort(gig_sorted, num_gig, sizeof(float), compare_float); /* gig_sorted jest posortowany malejąco */ threshold = gig_sorted[(int)((float)result_size * percent * percent)]; free(gig_sorted); } random_trial_process = timer.lap(); } /* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem, który zapisuje tylko wartości większe niż threshold */ { const int max_num_structs = result_size * 2; SyncArray<struct GigStruct> gig_structs(max_num_structs); SyncVar<int> num_structs; int y_size = 4; if (num_objects < 24000) y_size = 8; if (num_objects < 12000) y_size = 16; if (num_objects < 6000) y_size = 32; dim3 block_size(32, y_size); dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x, padToMultipleOf(num_vars, block_size.y) / block_size.y); compute_gig_wt_kernel<<<grid_size, block_size, (y_size / 4 * num_objects) + ((num_objects - 1) / 8 + 1)>>>((char*)vars.getDevice(), (char*)ds.getDevice(), num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(), max_num_structs, num_structs.getDevice(), a_priori, threshold); CUDA_CALL(cudaGetLastError()); cudaDeviceSynchronize(); main_kernel = timer.lap(); num_structs.syncToHost(); gig_structs.syncToHost(); main_copy = timer.lap(); real_result_size = *num_structs.getHost(); int to_sort = real_result_size > max_num_structs ? max_num_structs : real_result_size; qsort(gig_structs.getHost(), to_sort, sizeof(struct GigStruct), compare_gig); for (int i = to_sort - 1; i >= 0; --i) printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2); main_process = timer.lap(); } all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process; fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n"); fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold); fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n"); fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all); fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f, random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f, main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f); return 0; }
6741e682c9b1bbb4e29a0abf58d90c690bf1fce6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hiprand/hiprand.h> #include "../COMMON/commons.cuh" #include <thrust/device_ptr.h> #include <thrust/reduce.h> // Generates a random vector from a Poisson distribution from host side with parameter lamba. // Then computes the mean (should be equal to lambda). #define N 100000 int main(int argc, char **argv) { unsigned int lambda = 3; hiprandGenerator_t gen; unsigned int *h_v = new unsigned int[N]; unsigned int *d_v; // Allocate device memory CUDA_CHECK(hipMalloc(&d_v, N * sizeof(unsigned int))); // Create a hiprand generator (several generators exist, let's just take the default) CURAND_CHECK(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); // Set a seed CURAND_CHECK(hiprandSetPseudoRandomGeneratorSeed(gen, 1)); // Generate the random numbers on the device vector printf("Generating poisson distribution with parameter : %d\n", lambda); CURAND_CHECK(hiprandGeneratePoisson(gen, d_v, N, lambda)); // Compute and print the mean thrust::device_ptr<unsigned int> t_v = thrust::device_pointer_cast(d_v); float mean = (float) thrust::reduce(t_v, t_v + N); mean /= N; printf("Mean : %g\n", mean); // Optionally copy the vector back to the CPU CUDA_CHECK(hipMemcpy(h_v, d_v, N * sizeof(unsigned int), hipMemcpyDeviceToHost)); // Release the hiprand resource CURAND_CHECK(hiprandDestroyGenerator(gen)); return 0; }
6741e682c9b1bbb4e29a0abf58d90c690bf1fce6.cu
#include <stdio.h> #include <curand.h> #include "../COMMON/commons.cuh" #include <thrust/device_ptr.h> #include <thrust/reduce.h> // Generates a random vector from a Poisson distribution from host side with parameter lamba. // Then computes the mean (should be equal to lambda). #define N 100000 int main(int argc, char **argv) { unsigned int lambda = 3; curandGenerator_t gen; unsigned int *h_v = new unsigned int[N]; unsigned int *d_v; // Allocate device memory CUDA_CHECK(cudaMalloc(&d_v, N * sizeof(unsigned int))); // Create a curand generator (several generators exist, let's just take the default) CURAND_CHECK(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); // Set a seed CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(gen, 1)); // Generate the random numbers on the device vector printf("Generating poisson distribution with parameter : %d\n", lambda); CURAND_CHECK(curandGeneratePoisson(gen, d_v, N, lambda)); // Compute and print the mean thrust::device_ptr<unsigned int> t_v = thrust::device_pointer_cast(d_v); float mean = (float) thrust::reduce(t_v, t_v + N); mean /= N; printf("Mean : %g\n", mean); // Optionally copy the vector back to the CPU CUDA_CHECK(cudaMemcpy(h_v, d_v, N * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // Release the curand resource CURAND_CHECK(curandDestroyGenerator(gen)); return 0; }
20d3b47e473157ed646992c413efc7438c8581ab.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "solve_system.h" #define EPSILON 1e-2 #define ITERS 50 __global__ void jacobiMethod(float* coal, float* x, float* b, int col, int num); /** * Solves the system pointed to by _device_ pointers A_d and b_d, and non- * device pointer x. Col must be the number of elements in x and b_d, and * col^2 is the number of elements in A_d. */ float* solveSystem(float* A_d, float* b_d, int splines, int col) { float *x_d, *x = (float *) calloc(splines * col, sizeof(float)); int numA = col * col; hipError_t error; // Malloc space on the card. hipMalloc((void**)&x_d, splines * col * sizeof(float)); checkCudaError("solve_system.cu: error mallocing on device:"); // Copy the initial guess x onto the card. hipMemcpy(x_d, x, splines * col * sizeof(float), hipMemcpyHostToDevice); checkCudaError("solve_system.cu: error mem copying to device:"); // Run all iters of the jacobi method. for (int i = 0; i <= splines / MAX_GRID_SIZE; i++) { int numBlocks = i * MAX_GRID_SIZE; int blocks = (i == splines / MAX_GRID_SIZE) ? splines % MAX_GRID_SIZE : MAX_GRID_SIZE; hipLaunchKernelGGL(( jacobiMethod), dim3(blocks), dim3(col), 0, 0, &A_d[numBlocks * numA], &x_d[numBlocks * col], &b_d[numBlocks * col], col, numA); checkCudaError("solve_system.cu: error calling CUDA kernel:"); } free(x); return x_d; } __shared__ float psum[MAT_SIZE]; /** * Use the Jacobi Method (on wikipedia and others) to solve a linear system * of equations. This CUDA call executes one iteration of the method. * Multiple calls are expected to be made to find a suitable answer. Host * must copy x back and manually check whether convergence was reached. */ __global__ void jacobiMethod(float* coal, float* x, float* b, int col, int num) { int i; int idx = blockIdx.x * blockDim.x + threadIdx.x, d = 0; int elem = threadIdx.x * col; /*if (idx % blockDim.x < col) prev[elem] = 0;*/ for (i = 0; i < ITERS; i++) { //do { // Set diagonal elements to zero; otherwise multiply by corresponding x val. for (d = 0; d < col; d++) if ((elem + d) < num && (elem + d) % (col + 1) != 0) psum[elem + d] = coal[elem + d] * x[blockIdx.x * blockDim.x + d]; else psum[elem + d] = 0.0; __syncthreads(); // Add up all elements in each line of the psum (shared mem) matrix for (d = 1; d < col; d++) psum[elem] += psum[(elem) + d]; __syncthreads(); // Compute the next guess for x using the psum array. if (threadIdx.x < col) x[idx] = (b[idx] - psum[elem]) / coal[threadIdx.x * (col + 1)]; } }
20d3b47e473157ed646992c413efc7438c8581ab.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "solve_system.h" #define EPSILON 1e-2 #define ITERS 50 __global__ void jacobiMethod(float* coal, float* x, float* b, int col, int num); /** * Solves the system pointed to by _device_ pointers A_d and b_d, and non- * device pointer x. Col must be the number of elements in x and b_d, and * col^2 is the number of elements in A_d. */ float* solveSystem(float* A_d, float* b_d, int splines, int col) { float *x_d, *x = (float *) calloc(splines * col, sizeof(float)); int numA = col * col; cudaError_t error; // Malloc space on the card. cudaMalloc((void**)&x_d, splines * col * sizeof(float)); checkCudaError("solve_system.cu: error mallocing on device:"); // Copy the initial guess x onto the card. cudaMemcpy(x_d, x, splines * col * sizeof(float), cudaMemcpyHostToDevice); checkCudaError("solve_system.cu: error mem copying to device:"); // Run all iters of the jacobi method. for (int i = 0; i <= splines / MAX_GRID_SIZE; i++) { int numBlocks = i * MAX_GRID_SIZE; int blocks = (i == splines / MAX_GRID_SIZE) ? splines % MAX_GRID_SIZE : MAX_GRID_SIZE; jacobiMethod<<<blocks, col>>>(&A_d[numBlocks * numA], &x_d[numBlocks * col], &b_d[numBlocks * col], col, numA); checkCudaError("solve_system.cu: error calling CUDA kernel:"); } free(x); return x_d; } __shared__ float psum[MAT_SIZE]; /** * Use the Jacobi Method (on wikipedia and others) to solve a linear system * of equations. This CUDA call executes one iteration of the method. * Multiple calls are expected to be made to find a suitable answer. Host * must copy x back and manually check whether convergence was reached. */ __global__ void jacobiMethod(float* coal, float* x, float* b, int col, int num) { int i; int idx = blockIdx.x * blockDim.x + threadIdx.x, d = 0; int elem = threadIdx.x * col; /*if (idx % blockDim.x < col) prev[elem] = 0;*/ for (i = 0; i < ITERS; i++) { //do { // Set diagonal elements to zero; otherwise multiply by corresponding x val. for (d = 0; d < col; d++) if ((elem + d) < num && (elem + d) % (col + 1) != 0) psum[elem + d] = coal[elem + d] * x[blockIdx.x * blockDim.x + d]; else psum[elem + d] = 0.0; __syncthreads(); // Add up all elements in each line of the psum (shared mem) matrix for (d = 1; d < col; d++) psum[elem] += psum[(elem) + d]; __syncthreads(); // Compute the next guess for x using the psum array. if (threadIdx.x < col) x[idx] = (b[idx] - psum[elem]) / coal[threadIdx.x * (col + 1)]; } }
02eafc598281b5b4c3405a0e4ebbeed106e69a12.hip
// !!! This is a file automatically generated by hipify!!! //============================================================================= // FILE: util.cu // // DESC: This file implements the utility functions. //============================================================================= #include "util.h" //============================================================================= // Constants //============================================================================= //============================================================================= // Public Methods //============================================================================= inline double cint(double x) { double dfInt = 0; if (modf(x, &dfInt) >= 0.5) return (x >= 0) ? ceil(x) : floor(x); else return (x < 0) ? ceil(x) : floor(x); } inline double round2(double r, int places) { double off = pow(10.0, places); return cint(r*off)/off; } inline double roundex(double r) { char sz[256]; sprintf(sz, "%0.7lf", r); return atof(sz); } bool GetErrorString(long lKernel, long lErr, char* szErr, long lMaxErr) { if (GetCudaErrorString(lKernel, lErr, szErr, lMaxErr)) return true; switch (lErr) { case ERROR_DLL_NOT_INIT: _snprintf(szErr, lMaxErr, "DLL: The DLL is not initialized (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_DEVICE_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "DEVICE: The device is not initialized error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_PARAM: _snprintf(szErr, lMaxErr, "GENERAL: Parameter error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_PARAM_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "GENERAL: Parameter out of range (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_PARAM_NULL: _snprintf(szErr, lMaxErr, "GENERAL: Parameter is NULL (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_FILE_WRITE: _snprintf(szErr, lMaxErr, "GENERAL: Failure when writing to file (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_FILE_READ: _snprintf(szErr, lMaxErr, "GENERAL: Failure when reading from file (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_FILE_OPEN: _snprintf(szErr, lMaxErr, "GENERAL: Failure when opening a file (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX: _snprintf(szErr, lMaxErr, "MATRIX: general matrix error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_EXPECTED_DEVICE: _snprintf(szErr, lMaxErr, "MEMORY: Expected device memory but received host memory (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_RANGE_EXCEEDED: _snprintf(szErr, lMaxErr, "MEMORY: Exceeded the maximum amount of memory size available as a chunk (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_MIXED_HALF_TYPES: _snprintf(szErr, lMaxErr, "MEMORY: You are using a mix of half types and non-half types. All types for this function must be of the same type (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_HALF_TYPE_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "MEMORY: The GPU that you are using has limited half-type support. Full half-type support is only available on Maxwell gpu's with compute 5.3 and above (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_OUT: _snprintf(szErr, lMaxErr, "MEMORY: Out of memory (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_NOT_FOUND: _snprintf(szErr, lMaxErr, "MEMORY: Memory was not found and therefore could not be freed. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_BATCH_TOO_SMALL: _snprintf(szErr, lMaxErr, "DATA: The batch size used is too small - not enough label variety for sequencing. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_TOO_SMALL: _snprintf(szErr, lMaxErr, "MEMORY: Memory size allocated is too small - must allocate more memory for this operation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX_DIMENSIONS_DONT_MATCH: _snprintf(szErr, lMaxErr, "MATRIX: matrix dimensions do not match (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX_DIMENSIONS_EXCEED_THREADS: _snprintf(szErr, lMaxErr, "MATRIX: matrix dimensions exceed number of threads (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX_NOT_SQUARE: _snprintf(szErr, lMaxErr, "MATRIX: the current operation is only supported on square matrices (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_VECTOR: _snprintf(szErr, lMaxErr, "VECTOR: general vector error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_VECTOR_DIMENSIONS_DONT_MATCH: _snprintf(szErr, lMaxErr, "VECTOR: vector dimensions do not match (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_NN: _snprintf(szErr, lMaxErr, "NN: general neural net error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_NN_LAYER_COUNTS_DONT_MATCH: _snprintf(szErr, lMaxErr, "NN: layer counts do not match (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUBLAS_NULL: _snprintf(szErr, lMaxErr, "NN: The cublas handle is NULL! (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUDA_NOTSUPPORED_ON_DISPLAYGPU: _snprintf(szErr, lMaxErr, "CUDA: The function you are attempting to run is not supported on the display GPU (only supported on headless gpus)! (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUDA_MISSING_NCCL64DLL: _snprintf(szErr, lMaxErr, "CUDA: The 'nccl64' DLL is missing from the executable directory! For example when using the version 134 for CUDA 10.0, the file 'nccl64_134.10.0.dll' should be in the same directory as the executable. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUDA_KERNEL_NOT_IMPLEMENTED: _snprintf(szErr, lMaxErr, "CUDA: The kernel or specific function specified is not implemented yet! (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_TSNE: _snprintf(szErr, lMaxErr, "TSNE: A general TSN-E error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_TSNE_NO_DISTANCES_FOUND: _snprintf(szErr, lMaxErr, "TSNE: No differences found between the images - they may all be the same. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD: _snprintf(szErr, lMaxErr, "SSD: A general SSD error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "SSD: The SSD is not initialized. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_CODE_TYPE: _snprintf(szErr, lMaxErr, "SSD: The SSD code type specified is invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_BBOX_DIMENSION: _snprintf(szErr, lMaxErr, "SSD: The SSD bbox dimension (width or height) is invalid (e.g. < 0). (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_HOST_TYPE_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "SSD: The HOST type specified is not supported for this function. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_BAD_MATCH: _snprintf(szErr, lMaxErr, "SSD: The current matching is bad, expected a match index of -1. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_GT_LABEL_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "SSD: The ground truth label is out of range. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_PRIOR_VARIANCE_COUNT: _snprintf(szErr, lMaxErr, "SSD: The prior variances count does not match the prior bbox count. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_BACKGROUND_LABEL_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "SSD: The background label id is out of range. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_MATCH_INDEX_INCORRECT: _snprintf(szErr, lMaxErr, "SSD: The match_index should equal the number of priors in the compute conf loss calculation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_GT_MISSING_ITEM: _snprintf(szErr, lMaxErr, "SSD: The ground-truths are missing an expected itemId in the compute conf loss calculation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_MATCH_INDEX_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "SSD: The match index is out of range of the ground-truths in the compute conf loss calculation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_INVALID_LABEL: _snprintf(szErr, lMaxErr, "SSD: The label in the compute conf loss calculation is invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_NOT_SUPPORTED_IN_HALF_BBOX: _snprintf(szErr, lMaxErr, "SSD: The requested query is not supported by the half Bbox - only full BBox's support this type of query. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_LOC_PRED_LABEL_NOT_FOUND: _snprintf(szErr, lMaxErr, "SSD: Could not find an expected label in the loc predictions. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_SAMPLE_SIZE_TOO_SMALL: _snprintf(szErr, lMaxErr, "SSD: The sample size is too small and must be > 0. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_NUM_CLASSES: _snprintf(szErr, lMaxErr, "SSD: The number of classes is incorrect (e.g. when using map to agnostic, only 2 classes are valid for backgroundLabel >= 0, otherwise only 1 class is valid). (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_CONF_LOSS_TYPE: _snprintf(szErr, lMaxErr, "SSD: The conf loss type is unknown and invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_BACKGROUND_LABEL_IN_DATASET: _snprintf(szErr, lMaxErr, "SSD: The ground truth was found in the dataset. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_NUMLOCCLASSES_FOR_SHARED: _snprintf(szErr, lMaxErr, "SSD: The number of loc classes must be 1 when using shared location. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_LOCCOUNT_GTCOUNT: _snprintf(szErr, lMaxErr, "SSD: The loc pred and loc gt must be equal. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_LOC_LOSS_MATCH_COUNT: _snprintf(szErr, lMaxErr, "SSD: The loc loss match count is incorrect. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_LOC_LOSS_TYPE: _snprintf(szErr, lMaxErr, "SSD: The loc loss type is invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_MINEHARDEXAMPLES_NO_MATCHES: _snprintf(szErr, lMaxErr, "SSD: No matches were found to mine hard examples. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_LAYERNORM: _snprintf(szErr, lMaxErr, "LAYERNORM: A general LayerNorm error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_LAYERNORM_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "LAYERNORM: The LayerNorm is not initialized. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8: _snprintf(szErr, lMaxErr, "RNN8: A general Rnn8 error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "RNN8: The RNN8 is not initialized. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8_INCOMPATIBLE_CUDNN_VER: _snprintf(szErr, lMaxErr, "RNN8: The RNN8 requires cuDNN version 8.0+ to run. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8_INVALID_FILLER: _snprintf(szErr, lMaxErr, "RNN8: The RNN8 does not support the filler specified. (%ld), Kernel = %ld", lErr, lKernel); return true; } return false; } bool GetCudaErrorString(long lKernel, long lErr, char* szErr, long lMaxErr) { if (lErr == 0) return false; if ((lErr & ERROR_CUBLAS_OFFSET) == ERROR_CUBLAS_OFFSET) { lErr &= (~ERROR_CUBLAS_OFFSET); switch (lErr) { case HIPBLAS_STATUS_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "cuBlas: The cuBlas library was not initialized propertly (%ld), Kernel = %ld", lErr, lKernel); return true; case HIPBLAS_STATUS_ALLOC_FAILED: _snprintf(szErr, lMaxErr, "cuBlas: A resource allocation failed within the cuBlas library (%ld), Kernel = %ld", lErr, lKernel); return true; case HIPBLAS_STATUS_INVALID_VALUE: _snprintf(szErr, lMaxErr, "cuBlas: An invalid parameter was passed to the function. (%ld), Kernel = %ld", lErr, lKernel); return true; case HIPBLAS_STATUS_ARCH_MISMATCH: _snprintf(szErr, lMaxErr, "cuBlas: The function requires functionality not supported by the current device architecture. (%ld), Kernel = %ld", lErr, lKernel); return true; case HIPBLAS_STATUS_MAPPING_ERROR: _snprintf(szErr, lMaxErr, "cuBlas: Access to the GPU memory failed possibly caused by a failure to bind to a texture. (%ld), Kernel = %ld", lErr, lKernel); return true; case HIPBLAS_STATUS_EXECUTION_FAILED: _snprintf(szErr, lMaxErr, "cuBlas: A cuBlas GPU kernel failed to execute. (%ld), Kernel = %ld", lErr, lKernel); return true; case HIPBLAS_STATUS_INTERNAL_ERROR: _snprintf(szErr, lMaxErr, "cuBlas: A failure occurred within cuBlas. (%ld), Kernel = %ld", lErr, lKernel); return true; case HIPBLAS_STATUS_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "cuBlas: The function called is not supported. (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_LICENSE_ERROR: _snprintf(szErr, lMaxErr, "cuBlas: The functionality requested requires a license that is missing. (%ld), Kernel = %ld", lErr, lKernel); return true; } } else if ((lErr & ERROR_CUDNN_OFFSET) == ERROR_CUDNN_OFFSET) { lErr &= (~ERROR_CUDNN_OFFSET); switch (lErr) { case CUDNN_STATUS_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "cuDNN: The cuDNN library was not initialized propertly (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_ALLOC_FAILED: _snprintf(szErr, lMaxErr, "cuDNN: A resource allocation failed within the cuDNN library (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_BAD_PARAM: _snprintf(szErr, lMaxErr, "cuDNN: An incorrect parameter was passed to a function (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_INTERNAL_ERROR: _snprintf(szErr, lMaxErr, "cuDNN: An internal operation failed (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_INVALID_VALUE: _snprintf(szErr, lMaxErr, "cuDNN: An invalid value was detected (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_ARCH_MISMATCH: _snprintf(szErr, lMaxErr, "cuDNN: The function requires a feature not supported by the current GPU device - your device must have compute capability of 3.0 or greater (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_MAPPING_ERROR: _snprintf(szErr, lMaxErr, "cuDNN: An access to the GPU's memory space failed perhaps caused when binding to a texture (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_EXECUTION_FAILED: _snprintf(szErr, lMaxErr, "cuDNN: The current GPU program failed to execute (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "cuDNN: The functionality requested is not supported by this version of cuDNN (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_LICENSE_ERROR: _snprintf(szErr, lMaxErr, "cuDNN: The functionality requested requires a license that does not appear to exist (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING: _snprintf(szErr, lMaxErr, "cuDNN: The runtime library required by RNN calls (nvcuda.dll) cannot be found (%ld), Kernel = %ld", lErr, lKernel); return true; #if CUDNN_MAJOR >= 7 case CUDNN_STATUS_RUNTIME_IN_PROGRESS: _snprintf(szErr, lMaxErr, "cuDNN: Some tasks in the user stream are still running (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_RUNTIME_FP_OVERFLOW: _snprintf(szErr, lMaxErr, "cuDNN: A numerical overflow occurred while executing the GPU kernel (%ld), Kernel = %ld", lErr, lKernel); return true; #endif } return false; } switch (lErr) { case hipErrorMissingConfiguration: _snprintf(szErr, lMaxErr, "CUDA: Missing configuration error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorMemoryAllocation: _snprintf(szErr, lMaxErr, "CUDA: Memory allocation error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInitializationError: _snprintf(szErr, lMaxErr, "CUDA: Initialization error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorLaunchFailure: _snprintf(szErr, lMaxErr, "CUDA: Launch failure (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorPriorLaunchFailure: _snprintf(szErr, lMaxErr, "CUDA: Prior launch failure (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorLaunchTimeOut: _snprintf(szErr, lMaxErr, "CUDA: Prior launch failure - timeout (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorLaunchOutOfResources: _snprintf(szErr, lMaxErr, "CUDA: Launch out of resources error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidDeviceFunction: _snprintf(szErr, lMaxErr, "CUDA: Invalid device function (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidConfiguration: _snprintf(szErr, lMaxErr, "CUDA: Invalid configuration for the device used (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidDevice: _snprintf(szErr, lMaxErr, "CUDA: Invalid CUDA device (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidValue: _snprintf(szErr, lMaxErr, "CUDA: Invalid parameter value (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidPitchValue: _snprintf(szErr, lMaxErr, "CUDA: Invalid pitch parameter value (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidSymbol: _snprintf(szErr, lMaxErr, "CUDA: Invalid symbol (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorMapFailed: _snprintf(szErr, lMaxErr, "CUDA: Map buffer object failed (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorUnmapFailed: _snprintf(szErr, lMaxErr, "CUDA: Unmap buffer object failed (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidHostPointer: _snprintf(szErr, lMaxErr, "CUDA: Invalid host pointer (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidDevicePointer: _snprintf(szErr, lMaxErr, "CUDA: Invalid device pointer (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidTexture: _snprintf(szErr, lMaxErr, "CUDA: Invalid texture (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidTextureBinding: _snprintf(szErr, lMaxErr, "CUDA: Invalid texture binding (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidChannelDescriptor: _snprintf(szErr, lMaxErr, "CUDA: Invalid channel descriptor (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidMemcpyDirection: _snprintf(szErr, lMaxErr, "CUDA: Invalid memcpy direction (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorAddressOfConstant: _snprintf(szErr, lMaxErr, "CUDA: Address of constant error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorTextureFetchFailed: _snprintf(szErr, lMaxErr, "CUDA: Texture fetch failed (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorTextureNotBound: _snprintf(szErr, lMaxErr, "CUDA: Texture not bound error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorSynchronizationError: _snprintf(szErr, lMaxErr, "CUDA: Synchronization error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidFilterSetting: _snprintf(szErr, lMaxErr, "CUDA: Invalid filter setting (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidNormSetting: _snprintf(szErr, lMaxErr, "CUDA: Invalid norm setting (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorMixedDeviceExecution: _snprintf(szErr, lMaxErr, "CUDA: Mixed device execution (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorDeinitialized: _snprintf(szErr, lMaxErr, "CUDA: cuda runtime unloading (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorUnknown: _snprintf(szErr, lMaxErr, "CUDA: Unknown error condition (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorNotYetImplemented: _snprintf(szErr, lMaxErr, "CUDA: Function not yet implemented (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorMemoryValueTooLarge: _snprintf(szErr, lMaxErr, "CUDA: Memory value too large (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidResourceHandle: _snprintf(szErr, lMaxErr, "CUDA: Invalid resource handle (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorNotReady: _snprintf(szErr, lMaxErr, "CUDA: Not ready error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInsufficientDriver: _snprintf(szErr, lMaxErr, "CUDA: cuda runtime is newer than the installed NVIDIA CUDA driver (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorSetOnActiveProcess: _snprintf(szErr, lMaxErr, "CUDA: Set on active process error (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidSurface: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the surface parameter is invalid (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorNoDevice: _snprintf(szErr, lMaxErr, "CUDA: No available CUDA device (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorECCNotCorrectable: _snprintf(szErr, lMaxErr, "CUDA: Uncorrectable ECC error detected (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorSharedObjectSymbolNotFound: _snprintf(szErr, lMaxErr, "CUDA: The link to to a shared object failed to resolve (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorSharedObjectInitFailed: _snprintf(szErr, lMaxErr, "CUDA: The initialization of a shared object failed (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorUnsupportedLimit: _snprintf(szErr, lMaxErr, "CUDA: The ::hipLimit_t argument is not supported by the active device (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorDuplicateVariableName: _snprintf(szErr, lMaxErr, "CUDA: Inidcates that multiple global or constant variables share the same string name (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorDuplicateTextureName: _snprintf(szErr, lMaxErr, "CUDA: Inidcates that multiple texture variables share the same string name (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorDuplicateSurfaceName: _snprintf(szErr, lMaxErr, "CUDA: Inidcates that multiple surface variables share the same string name (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorDevicesUnavailable: _snprintf(szErr, lMaxErr, "CUDA: Indicates that all CUDA devices are busy or unavailable at the current time (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidImage: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the device kernel image is invalid (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorNoBinaryForGpu: _snprintf(szErr, lMaxErr, "CUDA: Indicates that there is no kernel image available that is suitable for the device (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorIncompatibleDriverContext: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the current context is not compatible with this CUDA Runtime (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorPeerAccessAlreadyEnabled: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a call to ::hipDeviceEnablePeerAccess is trying to re-enable peer addressing from a context that already has peer addressing enabled (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorPeerAccessNotEnabled: _snprintf(szErr, lMaxErr, "CUDA: Indicates that ::hipDeviceDisablePeerAccess is trying to disable peer addressing which has not been enabled yet (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorDeviceAlreadyInUse: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a call tried to access an exclusive-thread device that is already in use by a different thread (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorProfilerDisabled: _snprintf(szErr, lMaxErr, "CUDA: Indicates profiler is not initialized for this run (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorAssert: _snprintf(szErr, lMaxErr, "CUDA: An assert triggered in device code during kernel execution (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorTooManyPeers: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the hardware resources required ot enable peer access have been exhaused for one or more of the devices (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorHostMemoryAlreadyRegistered: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the memory range specified has already been registered (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorHostMemoryNotRegistered: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the pointer specified does not correspond to any currently registered memory region (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorOperatingSystem: _snprintf(szErr, lMaxErr, "CUDA: Indicates that an OS call failed (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorPeerAccessUnsupported: _snprintf(szErr, lMaxErr, "CUDA: Indicates that P2P access is not supported across the given devices (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorLaunchMaxDepthExceeded: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorLaunchFileScopedTex: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a grid launch did no occur because the kernel uses file-scoped textures which are unsupported by the device runtime (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorLaunchFileScopedSurf: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorSyncDepthExceeded: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a call to ::hipDeviceSynchronize made from the device runtime failed becaue the call was made at grid depth greater than either the default (2 levels) or a user specified limit (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorLaunchPendingCountExceeded: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a device runtime grid launch failed because the launch would exceed the limit ::hipLimitDevRuntimePendingLaunchCount (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorNotPermitted: _snprintf(szErr, lMaxErr, "CUDA: Indicates the attempted operation is not permitted (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorNotSupported: _snprintf(szErr, lMaxErr, "CUDA: Indicates the attempted operation is not supported on the current system or device (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorHardwareStackError: _snprintf(szErr, lMaxErr, "CUDA: Device encountered an error in the call statck during kernel execution possibly due to stack corruption or exceeding the stack size limit (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorIllegalInstruction: _snprintf(szErr, lMaxErr, "CUDA: Device encountered an illegal instruction during kernel execution (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorMisalignedAddress: _snprintf(szErr, lMaxErr, "CUDA: Device encountered a load or storage instruction on a memory address which is not aligned (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidAddressSpace: _snprintf(szErr, lMaxErr, "CUDA: While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied an address not in those spaces (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidPc: _snprintf(szErr, lMaxErr, "CUDA: Device encountered an invalid program counter (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorIllegalAddress: _snprintf(szErr, lMaxErr, "CUDA: Device encountered a load or storage instruction on an invalid memory address (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidKernelFile: _snprintf(szErr, lMaxErr, "CUDA: A PTX compilation failed (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorInvalidGraphicsContext: _snprintf(szErr, lMaxErr, "CUDA: Indicates an error with the OpenGL or DirectX context (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorNvlinkUncorrectable: _snprintf(szErr, lMaxErr, "CUDA: Indicates an uncorrectable NVLink error was detected during the execution (%ld), Kernel = %ld", lErr, lKernel); return true; case hipErrorStartupFailure: _snprintf(szErr, lMaxErr, "CUDA: Startup failure (%ld), Kernel = %ld", lErr, lKernel); return true; } return false; } //============================================================================= // Device Functions //============================================================================= //end util.cu
02eafc598281b5b4c3405a0e4ebbeed106e69a12.cu
//============================================================================= // FILE: util.cu // // DESC: This file implements the utility functions. //============================================================================= #include "util.h" //============================================================================= // Constants //============================================================================= //============================================================================= // Public Methods //============================================================================= inline double cint(double x) { double dfInt = 0; if (modf(x, &dfInt) >= 0.5) return (x >= 0) ? ceil(x) : floor(x); else return (x < 0) ? ceil(x) : floor(x); } inline double round2(double r, int places) { double off = pow(10.0, places); return cint(r*off)/off; } inline double roundex(double r) { char sz[256]; sprintf(sz, "%0.7lf", r); return atof(sz); } bool GetErrorString(long lKernel, long lErr, char* szErr, long lMaxErr) { if (GetCudaErrorString(lKernel, lErr, szErr, lMaxErr)) return true; switch (lErr) { case ERROR_DLL_NOT_INIT: _snprintf(szErr, lMaxErr, "DLL: The DLL is not initialized (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_DEVICE_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "DEVICE: The device is not initialized error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_PARAM: _snprintf(szErr, lMaxErr, "GENERAL: Parameter error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_PARAM_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "GENERAL: Parameter out of range (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_PARAM_NULL: _snprintf(szErr, lMaxErr, "GENERAL: Parameter is NULL (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_FILE_WRITE: _snprintf(szErr, lMaxErr, "GENERAL: Failure when writing to file (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_FILE_READ: _snprintf(szErr, lMaxErr, "GENERAL: Failure when reading from file (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_FILE_OPEN: _snprintf(szErr, lMaxErr, "GENERAL: Failure when opening a file (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX: _snprintf(szErr, lMaxErr, "MATRIX: general matrix error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_EXPECTED_DEVICE: _snprintf(szErr, lMaxErr, "MEMORY: Expected device memory but received host memory (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_RANGE_EXCEEDED: _snprintf(szErr, lMaxErr, "MEMORY: Exceeded the maximum amount of memory size available as a chunk (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_MIXED_HALF_TYPES: _snprintf(szErr, lMaxErr, "MEMORY: You are using a mix of half types and non-half types. All types for this function must be of the same type (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_HALF_TYPE_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "MEMORY: The GPU that you are using has limited half-type support. Full half-type support is only available on Maxwell gpu's with compute 5.3 and above (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_OUT: _snprintf(szErr, lMaxErr, "MEMORY: Out of memory (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_NOT_FOUND: _snprintf(szErr, lMaxErr, "MEMORY: Memory was not found and therefore could not be freed. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_BATCH_TOO_SMALL: _snprintf(szErr, lMaxErr, "DATA: The batch size used is too small - not enough label variety for sequencing. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MEMORY_TOO_SMALL: _snprintf(szErr, lMaxErr, "MEMORY: Memory size allocated is too small - must allocate more memory for this operation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX_DIMENSIONS_DONT_MATCH: _snprintf(szErr, lMaxErr, "MATRIX: matrix dimensions do not match (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX_DIMENSIONS_EXCEED_THREADS: _snprintf(szErr, lMaxErr, "MATRIX: matrix dimensions exceed number of threads (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_MATRIX_NOT_SQUARE: _snprintf(szErr, lMaxErr, "MATRIX: the current operation is only supported on square matrices (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_VECTOR: _snprintf(szErr, lMaxErr, "VECTOR: general vector error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_VECTOR_DIMENSIONS_DONT_MATCH: _snprintf(szErr, lMaxErr, "VECTOR: vector dimensions do not match (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_NN: _snprintf(szErr, lMaxErr, "NN: general neural net error (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_NN_LAYER_COUNTS_DONT_MATCH: _snprintf(szErr, lMaxErr, "NN: layer counts do not match (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUBLAS_NULL: _snprintf(szErr, lMaxErr, "NN: The cublas handle is NULL! (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUDA_NOTSUPPORED_ON_DISPLAYGPU: _snprintf(szErr, lMaxErr, "CUDA: The function you are attempting to run is not supported on the display GPU (only supported on headless gpus)! (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUDA_MISSING_NCCL64DLL: _snprintf(szErr, lMaxErr, "CUDA: The 'nccl64' DLL is missing from the executable directory! For example when using the version 134 for CUDA 10.0, the file 'nccl64_134.10.0.dll' should be in the same directory as the executable. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_CUDA_KERNEL_NOT_IMPLEMENTED: _snprintf(szErr, lMaxErr, "CUDA: The kernel or specific function specified is not implemented yet! (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_TSNE: _snprintf(szErr, lMaxErr, "TSNE: A general TSN-E error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_TSNE_NO_DISTANCES_FOUND: _snprintf(szErr, lMaxErr, "TSNE: No differences found between the images - they may all be the same. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD: _snprintf(szErr, lMaxErr, "SSD: A general SSD error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "SSD: The SSD is not initialized. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_CODE_TYPE: _snprintf(szErr, lMaxErr, "SSD: The SSD code type specified is invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_BBOX_DIMENSION: _snprintf(szErr, lMaxErr, "SSD: The SSD bbox dimension (width or height) is invalid (e.g. < 0). (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_HOST_TYPE_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "SSD: The HOST type specified is not supported for this function. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_BAD_MATCH: _snprintf(szErr, lMaxErr, "SSD: The current matching is bad, expected a match index of -1. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_GT_LABEL_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "SSD: The ground truth label is out of range. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_PRIOR_VARIANCE_COUNT: _snprintf(szErr, lMaxErr, "SSD: The prior variances count does not match the prior bbox count. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_BACKGROUND_LABEL_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "SSD: The background label id is out of range. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_MATCH_INDEX_INCORRECT: _snprintf(szErr, lMaxErr, "SSD: The match_index should equal the number of priors in the compute conf loss calculation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_GT_MISSING_ITEM: _snprintf(szErr, lMaxErr, "SSD: The ground-truths are missing an expected itemId in the compute conf loss calculation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_MATCH_INDEX_OUT_OF_RANGE: _snprintf(szErr, lMaxErr, "SSD: The match index is out of range of the ground-truths in the compute conf loss calculation. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_COMPUTE_CONF_LOSS_INVALID_LABEL: _snprintf(szErr, lMaxErr, "SSD: The label in the compute conf loss calculation is invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_NOT_SUPPORTED_IN_HALF_BBOX: _snprintf(szErr, lMaxErr, "SSD: The requested query is not supported by the half Bbox - only full BBox's support this type of query. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_LOC_PRED_LABEL_NOT_FOUND: _snprintf(szErr, lMaxErr, "SSD: Could not find an expected label in the loc predictions. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_SAMPLE_SIZE_TOO_SMALL: _snprintf(szErr, lMaxErr, "SSD: The sample size is too small and must be > 0. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_NUM_CLASSES: _snprintf(szErr, lMaxErr, "SSD: The number of classes is incorrect (e.g. when using map to agnostic, only 2 classes are valid for backgroundLabel >= 0, otherwise only 1 class is valid). (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_CONF_LOSS_TYPE: _snprintf(szErr, lMaxErr, "SSD: The conf loss type is unknown and invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_BACKGROUND_LABEL_IN_DATASET: _snprintf(szErr, lMaxErr, "SSD: The ground truth was found in the dataset. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_NUMLOCCLASSES_FOR_SHARED: _snprintf(szErr, lMaxErr, "SSD: The number of loc classes must be 1 when using shared location. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_LOCCOUNT_GTCOUNT: _snprintf(szErr, lMaxErr, "SSD: The loc pred and loc gt must be equal. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_LOC_LOSS_MATCH_COUNT: _snprintf(szErr, lMaxErr, "SSD: The loc loss match count is incorrect. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_INVALID_LOC_LOSS_TYPE: _snprintf(szErr, lMaxErr, "SSD: The loc loss type is invalid. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_SSD_MINEHARDEXAMPLES_NO_MATCHES: _snprintf(szErr, lMaxErr, "SSD: No matches were found to mine hard examples. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_LAYERNORM: _snprintf(szErr, lMaxErr, "LAYERNORM: A general LayerNorm error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_LAYERNORM_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "LAYERNORM: The LayerNorm is not initialized. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8: _snprintf(szErr, lMaxErr, "RNN8: A general Rnn8 error occurred. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "RNN8: The RNN8 is not initialized. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8_INCOMPATIBLE_CUDNN_VER: _snprintf(szErr, lMaxErr, "RNN8: The RNN8 requires cuDNN version 8.0+ to run. (%ld), Kernel = %ld", lErr, lKernel); return true; case ERROR_RNN8_INVALID_FILLER: _snprintf(szErr, lMaxErr, "RNN8: The RNN8 does not support the filler specified. (%ld), Kernel = %ld", lErr, lKernel); return true; } return false; } bool GetCudaErrorString(long lKernel, long lErr, char* szErr, long lMaxErr) { if (lErr == 0) return false; if ((lErr & ERROR_CUBLAS_OFFSET) == ERROR_CUBLAS_OFFSET) { lErr &= (~ERROR_CUBLAS_OFFSET); switch (lErr) { case CUBLAS_STATUS_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "cuBlas: The cuBlas library was not initialized propertly (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_ALLOC_FAILED: _snprintf(szErr, lMaxErr, "cuBlas: A resource allocation failed within the cuBlas library (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_INVALID_VALUE: _snprintf(szErr, lMaxErr, "cuBlas: An invalid parameter was passed to the function. (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_ARCH_MISMATCH: _snprintf(szErr, lMaxErr, "cuBlas: The function requires functionality not supported by the current device architecture. (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_MAPPING_ERROR: _snprintf(szErr, lMaxErr, "cuBlas: Access to the GPU memory failed possibly caused by a failure to bind to a texture. (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_EXECUTION_FAILED: _snprintf(szErr, lMaxErr, "cuBlas: A cuBlas GPU kernel failed to execute. (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_INTERNAL_ERROR: _snprintf(szErr, lMaxErr, "cuBlas: A failure occurred within cuBlas. (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "cuBlas: The function called is not supported. (%ld), Kernel = %ld", lErr, lKernel); return true; case CUBLAS_STATUS_LICENSE_ERROR: _snprintf(szErr, lMaxErr, "cuBlas: The functionality requested requires a license that is missing. (%ld), Kernel = %ld", lErr, lKernel); return true; } } else if ((lErr & ERROR_CUDNN_OFFSET) == ERROR_CUDNN_OFFSET) { lErr &= (~ERROR_CUDNN_OFFSET); switch (lErr) { case CUDNN_STATUS_NOT_INITIALIZED: _snprintf(szErr, lMaxErr, "cuDNN: The cuDNN library was not initialized propertly (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_ALLOC_FAILED: _snprintf(szErr, lMaxErr, "cuDNN: A resource allocation failed within the cuDNN library (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_BAD_PARAM: _snprintf(szErr, lMaxErr, "cuDNN: An incorrect parameter was passed to a function (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_INTERNAL_ERROR: _snprintf(szErr, lMaxErr, "cuDNN: An internal operation failed (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_INVALID_VALUE: _snprintf(szErr, lMaxErr, "cuDNN: An invalid value was detected (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_ARCH_MISMATCH: _snprintf(szErr, lMaxErr, "cuDNN: The function requires a feature not supported by the current GPU device - your device must have compute capability of 3.0 or greater (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_MAPPING_ERROR: _snprintf(szErr, lMaxErr, "cuDNN: An access to the GPU's memory space failed perhaps caused when binding to a texture (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_EXECUTION_FAILED: _snprintf(szErr, lMaxErr, "cuDNN: The current GPU program failed to execute (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_NOT_SUPPORTED: _snprintf(szErr, lMaxErr, "cuDNN: The functionality requested is not supported by this version of cuDNN (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_LICENSE_ERROR: _snprintf(szErr, lMaxErr, "cuDNN: The functionality requested requires a license that does not appear to exist (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING: _snprintf(szErr, lMaxErr, "cuDNN: The runtime library required by RNN calls (nvcuda.dll) cannot be found (%ld), Kernel = %ld", lErr, lKernel); return true; #if CUDNN_MAJOR >= 7 case CUDNN_STATUS_RUNTIME_IN_PROGRESS: _snprintf(szErr, lMaxErr, "cuDNN: Some tasks in the user stream are still running (%ld), Kernel = %ld", lErr, lKernel); return true; case CUDNN_STATUS_RUNTIME_FP_OVERFLOW: _snprintf(szErr, lMaxErr, "cuDNN: A numerical overflow occurred while executing the GPU kernel (%ld), Kernel = %ld", lErr, lKernel); return true; #endif } return false; } switch (lErr) { case cudaErrorMissingConfiguration: _snprintf(szErr, lMaxErr, "CUDA: Missing configuration error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorMemoryAllocation: _snprintf(szErr, lMaxErr, "CUDA: Memory allocation error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInitializationError: _snprintf(szErr, lMaxErr, "CUDA: Initialization error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorLaunchFailure: _snprintf(szErr, lMaxErr, "CUDA: Launch failure (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorPriorLaunchFailure: _snprintf(szErr, lMaxErr, "CUDA: Prior launch failure (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorLaunchTimeout: _snprintf(szErr, lMaxErr, "CUDA: Prior launch failure - timeout (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorLaunchOutOfResources: _snprintf(szErr, lMaxErr, "CUDA: Launch out of resources error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidDeviceFunction: _snprintf(szErr, lMaxErr, "CUDA: Invalid device function (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidConfiguration: _snprintf(szErr, lMaxErr, "CUDA: Invalid configuration for the device used (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidDevice: _snprintf(szErr, lMaxErr, "CUDA: Invalid CUDA device (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidValue: _snprintf(szErr, lMaxErr, "CUDA: Invalid parameter value (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidPitchValue: _snprintf(szErr, lMaxErr, "CUDA: Invalid pitch parameter value (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidSymbol: _snprintf(szErr, lMaxErr, "CUDA: Invalid symbol (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorMapBufferObjectFailed: _snprintf(szErr, lMaxErr, "CUDA: Map buffer object failed (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorUnmapBufferObjectFailed: _snprintf(szErr, lMaxErr, "CUDA: Unmap buffer object failed (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidHostPointer: _snprintf(szErr, lMaxErr, "CUDA: Invalid host pointer (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidDevicePointer: _snprintf(szErr, lMaxErr, "CUDA: Invalid device pointer (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidTexture: _snprintf(szErr, lMaxErr, "CUDA: Invalid texture (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidTextureBinding: _snprintf(szErr, lMaxErr, "CUDA: Invalid texture binding (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidChannelDescriptor: _snprintf(szErr, lMaxErr, "CUDA: Invalid channel descriptor (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidMemcpyDirection: _snprintf(szErr, lMaxErr, "CUDA: Invalid memcpy direction (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorAddressOfConstant: _snprintf(szErr, lMaxErr, "CUDA: Address of constant error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorTextureFetchFailed: _snprintf(szErr, lMaxErr, "CUDA: Texture fetch failed (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorTextureNotBound: _snprintf(szErr, lMaxErr, "CUDA: Texture not bound error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorSynchronizationError: _snprintf(szErr, lMaxErr, "CUDA: Synchronization error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidFilterSetting: _snprintf(szErr, lMaxErr, "CUDA: Invalid filter setting (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidNormSetting: _snprintf(szErr, lMaxErr, "CUDA: Invalid norm setting (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorMixedDeviceExecution: _snprintf(szErr, lMaxErr, "CUDA: Mixed device execution (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorCudartUnloading: _snprintf(szErr, lMaxErr, "CUDA: cuda runtime unloading (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorUnknown: _snprintf(szErr, lMaxErr, "CUDA: Unknown error condition (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorNotYetImplemented: _snprintf(szErr, lMaxErr, "CUDA: Function not yet implemented (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorMemoryValueTooLarge: _snprintf(szErr, lMaxErr, "CUDA: Memory value too large (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidResourceHandle: _snprintf(szErr, lMaxErr, "CUDA: Invalid resource handle (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorNotReady: _snprintf(szErr, lMaxErr, "CUDA: Not ready error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInsufficientDriver: _snprintf(szErr, lMaxErr, "CUDA: cuda runtime is newer than the installed NVIDIA CUDA driver (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorSetOnActiveProcess: _snprintf(szErr, lMaxErr, "CUDA: Set on active process error (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidSurface: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the surface parameter is invalid (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorNoDevice: _snprintf(szErr, lMaxErr, "CUDA: No available CUDA device (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorECCUncorrectable: _snprintf(szErr, lMaxErr, "CUDA: Uncorrectable ECC error detected (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorSharedObjectSymbolNotFound: _snprintf(szErr, lMaxErr, "CUDA: The link to to a shared object failed to resolve (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorSharedObjectInitFailed: _snprintf(szErr, lMaxErr, "CUDA: The initialization of a shared object failed (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorUnsupportedLimit: _snprintf(szErr, lMaxErr, "CUDA: The ::cudaLimit argument is not supported by the active device (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorDuplicateVariableName: _snprintf(szErr, lMaxErr, "CUDA: Inidcates that multiple global or constant variables share the same string name (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorDuplicateTextureName: _snprintf(szErr, lMaxErr, "CUDA: Inidcates that multiple texture variables share the same string name (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorDuplicateSurfaceName: _snprintf(szErr, lMaxErr, "CUDA: Inidcates that multiple surface variables share the same string name (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorDevicesUnavailable: _snprintf(szErr, lMaxErr, "CUDA: Indicates that all CUDA devices are busy or unavailable at the current time (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidKernelImage: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the device kernel image is invalid (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorNoKernelImageForDevice: _snprintf(szErr, lMaxErr, "CUDA: Indicates that there is no kernel image available that is suitable for the device (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorIncompatibleDriverContext: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the current context is not compatible with this CUDA Runtime (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorPeerAccessAlreadyEnabled: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a call to ::cudaDeviceEnablePeerAccess is trying to re-enable peer addressing from a context that already has peer addressing enabled (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorPeerAccessNotEnabled: _snprintf(szErr, lMaxErr, "CUDA: Indicates that ::cudaDeviceDisablePeerAccess is trying to disable peer addressing which has not been enabled yet (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorDeviceAlreadyInUse: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a call tried to access an exclusive-thread device that is already in use by a different thread (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorProfilerDisabled: _snprintf(szErr, lMaxErr, "CUDA: Indicates profiler is not initialized for this run (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorAssert: _snprintf(szErr, lMaxErr, "CUDA: An assert triggered in device code during kernel execution (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorTooManyPeers: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the hardware resources required ot enable peer access have been exhaused for one or more of the devices (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorHostMemoryAlreadyRegistered: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the memory range specified has already been registered (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorHostMemoryNotRegistered: _snprintf(szErr, lMaxErr, "CUDA: Indicates that the pointer specified does not correspond to any currently registered memory region (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorOperatingSystem: _snprintf(szErr, lMaxErr, "CUDA: Indicates that an OS call failed (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorPeerAccessUnsupported: _snprintf(szErr, lMaxErr, "CUDA: Indicates that P2P access is not supported across the given devices (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorLaunchMaxDepthExceeded: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorLaunchFileScopedTex: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a grid launch did no occur because the kernel uses file-scoped textures which are unsupported by the device runtime (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorLaunchFileScopedSurf: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorSyncDepthExceeded: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a call to ::cudaDeviceSynchronize made from the device runtime failed becaue the call was made at grid depth greater than either the default (2 levels) or a user specified limit (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorLaunchPendingCountExceeded: _snprintf(szErr, lMaxErr, "CUDA: Indicates that a device runtime grid launch failed because the launch would exceed the limit ::cudaLimitDevRuntimePendingLaunchCount (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorNotPermitted: _snprintf(szErr, lMaxErr, "CUDA: Indicates the attempted operation is not permitted (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorNotSupported: _snprintf(szErr, lMaxErr, "CUDA: Indicates the attempted operation is not supported on the current system or device (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorHardwareStackError: _snprintf(szErr, lMaxErr, "CUDA: Device encountered an error in the call statck during kernel execution possibly due to stack corruption or exceeding the stack size limit (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorIllegalInstruction: _snprintf(szErr, lMaxErr, "CUDA: Device encountered an illegal instruction during kernel execution (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorMisalignedAddress: _snprintf(szErr, lMaxErr, "CUDA: Device encountered a load or storage instruction on a memory address which is not aligned (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidAddressSpace: _snprintf(szErr, lMaxErr, "CUDA: While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied an address not in those spaces (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidPc: _snprintf(szErr, lMaxErr, "CUDA: Device encountered an invalid program counter (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorIllegalAddress: _snprintf(szErr, lMaxErr, "CUDA: Device encountered a load or storage instruction on an invalid memory address (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidPtx: _snprintf(szErr, lMaxErr, "CUDA: A PTX compilation failed (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorInvalidGraphicsContext: _snprintf(szErr, lMaxErr, "CUDA: Indicates an error with the OpenGL or DirectX context (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorNvlinkUncorrectable: _snprintf(szErr, lMaxErr, "CUDA: Indicates an uncorrectable NVLink error was detected during the execution (%ld), Kernel = %ld", lErr, lKernel); return true; case cudaErrorStartupFailure: _snprintf(szErr, lMaxErr, "CUDA: Startup failure (%ld), Kernel = %ld", lErr, lKernel); return true; } return false; } //============================================================================= // Device Functions //============================================================================= //end util.cu
358f07fbbab3a6cd3ee4675b19e043a8511bd2bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda_runtime.h> #include<stdlib.h> #include<math.h> #include<string.h> #include <math.h> #include<hiprand/hiprand.h> #define SIZE_POP 5000 //#define SIZE_IND 40 //Fixed length //#define SIZE_PL 10 #define N_GEN 200 #define P_CROSS 48 #define P_MUT 48 #define UPPER 254 #define LOWER 0 //Load data define thing function #define FILE_NAME "DataValues.csv" #define NUM_ROWS 40 #define NUM_COLUMS 2 #define COMA " ," int8_t randomRangeInt(int lower, int upper){ return ((int) round((rand() % (upper - lower +1))+lower)); } double randomRange(int lower, int upper){ return ((double) (rand() % (upper - lower +1))+lower); } void createInitRandPop(int8_t *pop){ for(int i = 0; i < SIZE_POP*4; i++){ *(pop + i) = round(randomRange(0, 254)); } } void printPopulation(int8_t *pop){ for(int i = 0; i < SIZE_POP;i++){ for(int j = 0; j < 4; j++){ printf("%d ", *(pop + i*4 + j)); } printf("\n"); } } void loadData(double *data){ //double data[NUM_ROWS][NUM_COLUMS]; FILE* data_file = fopen(FILE_NAME, "r"); char line[NUM_ROWS]; int i = 0; while(fgets(line, sizeof(line), data_file)){ char* tok = strtok(line, COMA); int j = 0; while(tok != NULL){ *(data + i*NUM_COLUMS + j) = atof(tok); //const char to double tok = strtok(NULL, COMA); j++; } i++; } } //Insertion sort algorithm void insertionSort(double *fit, int8_t *pop){ int arr_lenght = SIZE_POP; for(int k = 0; k < arr_lenght - 1; k++){ for(int i = k + 1; i < arr_lenght; i++){ if(*(fit + k) > *(fit + i)){ //helper variables int8_t p_hlp[4] = {0}; double f_hlp = 0; //switch fitness f_hlp = *(fit + k); *(fit + k) = *(fit + i); *(fit + i) = f_hlp; //switch individuals //Copy array[i] to hlp[] for(int j = 0; j < 4; j++){ p_hlp[j] = *(pop + k*4 + j); } for(int j = 0; j < 4; j++){ *(pop + k*4 + j) = *(pop + i*4 + j); } for(int j = 0; j < 4; j++){ *(pop + i*4 + j) = p_hlp[j]; } } } } } int *indDecToBin(int8_t a,int8_t b,int8_t c,int8_t d){ int *bin = (int *)malloc(sizeof(int)* 32); int i = 0; //printf("%d\n",a); int aa[8]; for(int j = 0; j < 8; j++){ if( a > 0){ aa[j] = a % 2; a = a / 2; } else{ aa[j] = 0; } //printf("%d",aa[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",aa[j]); bin[i] = aa[j]; i++; } // printf("%d\n",b); int ab[8]; for(int j = 0; j < 8; j++){ if( b > 0){ ab[j] = b % 2; b = b / 2; } else{ ab[j] = 0; } // printf("%d",ab[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",ab[j]); bin[i] = ab[j]; i++; } //printf("%d\n",c); int ac[8]; for(int j = 0; j < 8; j++){ if( c > 0){ ac[j] = c % 2; c = c / 2; } else{ ac[j] = 0; } // printf("%d",ac[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",ac[j]); bin[i] = ac[j]; i++; } //printf("%d\n",d); int ad[8]; for(int j = 0; j < 8; j++){ if( d > 0){ ad[j] = d % 2; d = d / 2; } else{ ad[j] = 0; } // printf("%d",ad[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",ad[j]); bin[i] = ad[j]; i++; } return bin; } int8_t get_a(int *ind){ int8_t a = 0; for(int i = 0; i < 8; i++){ a += ind[i]*pow(2,8-i); } return a; } int8_t get_b(int *ind){ int8_t x = 0; for(int i = 0; i < 8; i++){ x += ind[i+8]*pow(2,8-(i+1)); //printf("%d\n",ind[i+8]); } return x; } int8_t get_c(int *ind){ int8_t x = 0; for(int i = 0; i < 8; i++){ x += ind[i+16]*pow(2,8-(i+1)); //printf("%d\n",ind[i+16]); } return x; } int8_t get_d(int *ind){ int8_t x = 0; for(int i = 0; i < 8; i++){ x += ind[i+24]*pow(2,8-(i+1)); //printf("%d\n",ind[i+24]); } return x; } void geneticOperations(int8_t *pop, int8_t *new_pop){ //Mutation int n_best_m = round(((double)SIZE_POP / 100) * P_MUT); int i; for(i = 0; i < n_best_m; i++){ //random number between 0 - 4 int rand = round(randomRange(0, 32)); for(int j = 0; j < 4; j++){ *(new_pop + i*4 + j) = *(pop +i*4 + j); } int *ind = indDecToBin(*(new_pop +i*4 +0),*(new_pop +i*4 +1), *(new_pop +i*4 +2),*(new_pop +i*4 +3)); ; //printf("\n"); /*for(int j = 0; j < 32; j++){ printf("%d", ind[j]); }*/ if(ind[rand] == 1){ ind[rand] = 0; } else{ ind[rand] = 1; } /*for(int j = 0; j < 32; j++){ printf("%d", ind[j]); }*/ *(new_pop +i*4 +0) = get_a(ind); // printf("### a = %d\n", a); *(new_pop +i*4 +1) = get_b(ind); // printf("### d = %d\n", b); *(new_pop +i*4 +2) = get_c(ind); // printf("### c = %d\n", c); *(new_pop +i*4 +3) = get_d(ind); // printf("### d = %d\n", d); } //crossover int n_best_c = round(((double)SIZE_POP / 100) * P_CROSS) + n_best_m; int x = 0; /*printf("i = %d | n_best_m = %d | n_best_c = %d\n", i,n_best_m,n_best_c-n_best_m); printf("pop:\n"); printPopulation(new_pop);*/ for(i ; i < n_best_c; i = i + 2){ if((x+1) >= SIZE_POP){ x = 0; } int *ind1 = indDecToBin(*(pop +x*4 +0),*(pop +x*4 +1), *(pop +x*4 +2),*(pop +x*4 +3)); int *ind2 = indDecToBin(*(pop +(x+1)*4 +0),*(pop +(x+1)*4 +1), *(pop +(x+1)*4 +2),*(pop +(x+1)*4 +3)); int new_ind1[32] = {0}; int new_ind2[32] = {0}; int rand = round(randomRange(0, 32)); for(int k = 0; k < 32; k++){ if(k<rand){ new_ind1[k] = ind1[k]; new_ind2[k] = ind2[k]; } else{ new_ind1[k] = ind2[k]; new_ind2[k] = ind1[k]; } } x = x + 2; /*for(int b = 0; b < 32; b++){ printf("%d",ind1[b]); } printf("\n"); for(int b = 0; b < 32; b++){ printf("%d",ind2[b]); } printf("\n"); for(int b = 0; b < 32; b++){ printf("%d",new_ind1[b]); } printf("\n"); for(int b = 0; b < 32; b++){ printf("%d",new_ind2[b]); } printf("\n");*/ *(new_pop +i*4 +0) = get_a(new_ind1); // printf("### a = %d\n", a); *(new_pop +i*4 +1) = get_b(new_ind1); // printf("### d = %d\n", b); *(new_pop +i*4 +2) = get_c(new_ind1); // printf("### c = %d\n", c); *(new_pop +i*4 +3) = get_d(new_ind1); // printf("### d = %d\n", d); *(new_pop +(i+1)*4 +0) = get_a(new_ind2); // printf("### a = %d\n", a); *(new_pop +(i+1)*4 +1) = get_b(new_ind2); // printf("### d = %d\n", b); *(new_pop +(i+1)*4 +2) = get_c(new_ind2); // printf("### c = %d\n", c); *(new_pop +(i+1)*4 +3) = get_d(new_ind2); // printf("### d = %d\n", d); /* printf("i = %d | n_best_m = %d | n_best_c = %d | k = %d\n", i,n_best_m,n_best_c-n_best_m, rand); printf("pop:\n"); printPopulation(new_pop);*/ } //Reproduction int j = 0; for(i; i < SIZE_POP; i++){ for(int k = 0; k < 4; k++){ *(new_pop + i*4 + k) = *(pop +j*4 + k); } j++; } } __global__ void calculateFitness(const int8_t *pop, double *fit, const double *data){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < SIZE_POP){ double arr_polyn[NUM_ROWS]; for(int j = 0; j < NUM_ROWS; j++){ double out_polyn = 0; for (int k = 0; k < 4; k++){ out_polyn += (*(pop + i*4 + k)*(pow(*(data + j*2 + 0), ((double)(4-(k+1)))))); } arr_polyn[j] = out_polyn; } double fitness_function = 0; for(int j = 0; j < NUM_ROWS; j++){ fitness_function += pow((*(data + j*2 + 1) - arr_polyn[j]), 2); } *(fit + i) = fitness_function; } } int main(void){ //Inicialize random number generaion time_t t; srand((unsigned) time(&t)); //Load data from .csv file double *data = (double *)malloc(NUM_ROWS*NUM_COLUMS*sizeof(double)); loadData(data); //error code to check return values for CUDA calls hipError_t err = hipSuccess; //variable for size of population; size_t sizePop = 4 * SIZE_POP * sizeof(int8_t); size_t sizeFit = SIZE_POP * sizeof(double); //allocation of memory for population on host int8_t *population = (int8_t *)malloc(sizePop); int8_t *new_population = (int8_t *)malloc(sizePop); double *fitness = (double *)malloc(sizeFit); //verification of succesed allocation if(population == NULL || data == NULL || fitness == NULL || new_population == NULL){ fprintf(stderr, "Failed to allocate host memory\n"); exit(EXIT_FAILURE); } //allocating of memory for population on device int8_t *d_population = NULL; err = hipMalloc((void **)&d_population, sizePop); if(err != hipSuccess){ fprintf(stderr, "Failed allocate memory for population on device(error code %s)\n",hipGetErrorString(err)); exit(EXIT_FAILURE); } //allocating of memory for data on device double *d_data = NULL; err = hipMalloc((void **)&d_data, (NUM_ROWS*NUM_COLUMS*sizeof(double))); //checking succeded allocation on device memory if(err != hipSuccess){ fprintf(stderr, "Failed allocate memory for data on device(error code %s)\n",hipGetErrorString(err)); exit(EXIT_FAILURE); } //allocating of memory for fitness on device double *d_fitness = NULL; err = hipMalloc((void **)&d_fitness, sizeFit); //checking succeded allocation on device memory if(err != hipSuccess){ fprintf(stderr, "Failed allocate memory for fitness on device(error code %s)\n",hipGetErrorString(err)); exit(EXIT_FAILURE); } //BEGINING OF GA int e = 0; do{ e++; //-------- //Creating initial population createInitRandPop(population); //-------- //Checking termination criteria int gen = 0; while(gen != N_GEN){ //-------- //Evaluate fitness //copy population and data from host to device err = hipMemcpy(d_population, population, sizePop, hipMemcpyHostToDevice); if(err != hipSuccess){ fprintf(stderr, "Failed copy population to device errCode: %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_data, data, (NUM_ROWS*NUM_COLUMS*sizeof(double)), hipMemcpyHostToDevice); if(err != hipSuccess){ fprintf(stderr, "Failed copy data to device errCode: %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int threadsPerBlock = 256; int blocksPerGrid = (SIZE_POP + threadsPerBlock - 1) / threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( calculateFitness), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_population, d_fitness, d_data); err = hipMemcpy(fitness, d_fitness, sizeFit, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy fitness from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); //sort population insertionSort(fitness, population); /*printPopulation(population); for(int c = 0; c < SIZE_POP; c++){ printf("%f\n", fitness[c]); }*/ printf("%d, %d, %d, %d | fitness = %f || gen: %d\n", *(population + 0),*(population + 1), *(population + 2),*(population + 3),*(fitness + 0), gen); //apply genetioc operation geneticOperations(population, new_population); //copy new population to old population for(int i = 0; i < SIZE_POP; i++){ for(int j = 0; j < 4; j++){ *(population + i*4 +j) = *(new_population + i*4 + j); } } //-------- gen++; } }while(*(fitness) != 0); free(population); free(new_population); free(data); free(fitness); hipFree(d_population); hipFree(d_data); hipFree(d_fitness); return 0; }
358f07fbbab3a6cd3ee4675b19e043a8511bd2bc.cu
#include<stdio.h> #include<cuda_runtime.h> #include<stdlib.h> #include<math.h> #include<string.h> #include <math.h> #include<curand.h> #define SIZE_POP 5000 //#define SIZE_IND 40 //Fixed length //#define SIZE_PL 10 #define N_GEN 200 #define P_CROSS 48 #define P_MUT 48 #define UPPER 254 #define LOWER 0 //Load data define thing function #define FILE_NAME "DataValues.csv" #define NUM_ROWS 40 #define NUM_COLUMS 2 #define COMA " ," int8_t randomRangeInt(int lower, int upper){ return ((int) round((rand() % (upper - lower +1))+lower)); } double randomRange(int lower, int upper){ return ((double) (rand() % (upper - lower +1))+lower); } void createInitRandPop(int8_t *pop){ for(int i = 0; i < SIZE_POP*4; i++){ *(pop + i) = round(randomRange(0, 254)); } } void printPopulation(int8_t *pop){ for(int i = 0; i < SIZE_POP;i++){ for(int j = 0; j < 4; j++){ printf("%d ", *(pop + i*4 + j)); } printf("\n"); } } void loadData(double *data){ //double data[NUM_ROWS][NUM_COLUMS]; FILE* data_file = fopen(FILE_NAME, "r"); char line[NUM_ROWS]; int i = 0; while(fgets(line, sizeof(line), data_file)){ char* tok = strtok(line, COMA); int j = 0; while(tok != NULL){ *(data + i*NUM_COLUMS + j) = atof(tok); //const char to double tok = strtok(NULL, COMA); j++; } i++; } } //Insertion sort algorithm void insertionSort(double *fit, int8_t *pop){ int arr_lenght = SIZE_POP; for(int k = 0; k < arr_lenght - 1; k++){ for(int i = k + 1; i < arr_lenght; i++){ if(*(fit + k) > *(fit + i)){ //helper variables int8_t p_hlp[4] = {0}; double f_hlp = 0; //switch fitness f_hlp = *(fit + k); *(fit + k) = *(fit + i); *(fit + i) = f_hlp; //switch individuals //Copy array[i] to hlp[] for(int j = 0; j < 4; j++){ p_hlp[j] = *(pop + k*4 + j); } for(int j = 0; j < 4; j++){ *(pop + k*4 + j) = *(pop + i*4 + j); } for(int j = 0; j < 4; j++){ *(pop + i*4 + j) = p_hlp[j]; } } } } } int *indDecToBin(int8_t a,int8_t b,int8_t c,int8_t d){ int *bin = (int *)malloc(sizeof(int)* 32); int i = 0; //printf("%d\n",a); int aa[8]; for(int j = 0; j < 8; j++){ if( a > 0){ aa[j] = a % 2; a = a / 2; } else{ aa[j] = 0; } //printf("%d",aa[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",aa[j]); bin[i] = aa[j]; i++; } // printf("%d\n",b); int ab[8]; for(int j = 0; j < 8; j++){ if( b > 0){ ab[j] = b % 2; b = b / 2; } else{ ab[j] = 0; } // printf("%d",ab[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",ab[j]); bin[i] = ab[j]; i++; } //printf("%d\n",c); int ac[8]; for(int j = 0; j < 8; j++){ if( c > 0){ ac[j] = c % 2; c = c / 2; } else{ ac[j] = 0; } // printf("%d",ac[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",ac[j]); bin[i] = ac[j]; i++; } //printf("%d\n",d); int ad[8]; for(int j = 0; j < 8; j++){ if( d > 0){ ad[j] = d % 2; d = d / 2; } else{ ad[j] = 0; } // printf("%d",ad[j]); } //printf("\n"); for(int j = 7; j >= 0; j--){ // printf("%d",ad[j]); bin[i] = ad[j]; i++; } return bin; } int8_t get_a(int *ind){ int8_t a = 0; for(int i = 0; i < 8; i++){ a += ind[i]*pow(2,8-i); } return a; } int8_t get_b(int *ind){ int8_t x = 0; for(int i = 0; i < 8; i++){ x += ind[i+8]*pow(2,8-(i+1)); //printf("%d\n",ind[i+8]); } return x; } int8_t get_c(int *ind){ int8_t x = 0; for(int i = 0; i < 8; i++){ x += ind[i+16]*pow(2,8-(i+1)); //printf("%d\n",ind[i+16]); } return x; } int8_t get_d(int *ind){ int8_t x = 0; for(int i = 0; i < 8; i++){ x += ind[i+24]*pow(2,8-(i+1)); //printf("%d\n",ind[i+24]); } return x; } void geneticOperations(int8_t *pop, int8_t *new_pop){ //Mutation int n_best_m = round(((double)SIZE_POP / 100) * P_MUT); int i; for(i = 0; i < n_best_m; i++){ //random number between 0 - 4 int rand = round(randomRange(0, 32)); for(int j = 0; j < 4; j++){ *(new_pop + i*4 + j) = *(pop +i*4 + j); } int *ind = indDecToBin(*(new_pop +i*4 +0),*(new_pop +i*4 +1), *(new_pop +i*4 +2),*(new_pop +i*4 +3)); ; //printf("\n"); /*for(int j = 0; j < 32; j++){ printf("%d", ind[j]); }*/ if(ind[rand] == 1){ ind[rand] = 0; } else{ ind[rand] = 1; } /*for(int j = 0; j < 32; j++){ printf("%d", ind[j]); }*/ *(new_pop +i*4 +0) = get_a(ind); // printf("### a = %d\n", a); *(new_pop +i*4 +1) = get_b(ind); // printf("### d = %d\n", b); *(new_pop +i*4 +2) = get_c(ind); // printf("### c = %d\n", c); *(new_pop +i*4 +3) = get_d(ind); // printf("### d = %d\n", d); } //crossover int n_best_c = round(((double)SIZE_POP / 100) * P_CROSS) + n_best_m; int x = 0; /*printf("i = %d | n_best_m = %d | n_best_c = %d\n", i,n_best_m,n_best_c-n_best_m); printf("pop:\n"); printPopulation(new_pop);*/ for(i ; i < n_best_c; i = i + 2){ if((x+1) >= SIZE_POP){ x = 0; } int *ind1 = indDecToBin(*(pop +x*4 +0),*(pop +x*4 +1), *(pop +x*4 +2),*(pop +x*4 +3)); int *ind2 = indDecToBin(*(pop +(x+1)*4 +0),*(pop +(x+1)*4 +1), *(pop +(x+1)*4 +2),*(pop +(x+1)*4 +3)); int new_ind1[32] = {0}; int new_ind2[32] = {0}; int rand = round(randomRange(0, 32)); for(int k = 0; k < 32; k++){ if(k<rand){ new_ind1[k] = ind1[k]; new_ind2[k] = ind2[k]; } else{ new_ind1[k] = ind2[k]; new_ind2[k] = ind1[k]; } } x = x + 2; /*for(int b = 0; b < 32; b++){ printf("%d",ind1[b]); } printf("\n"); for(int b = 0; b < 32; b++){ printf("%d",ind2[b]); } printf("\n"); for(int b = 0; b < 32; b++){ printf("%d",new_ind1[b]); } printf("\n"); for(int b = 0; b < 32; b++){ printf("%d",new_ind2[b]); } printf("\n");*/ *(new_pop +i*4 +0) = get_a(new_ind1); // printf("### a = %d\n", a); *(new_pop +i*4 +1) = get_b(new_ind1); // printf("### d = %d\n", b); *(new_pop +i*4 +2) = get_c(new_ind1); // printf("### c = %d\n", c); *(new_pop +i*4 +3) = get_d(new_ind1); // printf("### d = %d\n", d); *(new_pop +(i+1)*4 +0) = get_a(new_ind2); // printf("### a = %d\n", a); *(new_pop +(i+1)*4 +1) = get_b(new_ind2); // printf("### d = %d\n", b); *(new_pop +(i+1)*4 +2) = get_c(new_ind2); // printf("### c = %d\n", c); *(new_pop +(i+1)*4 +3) = get_d(new_ind2); // printf("### d = %d\n", d); /* printf("i = %d | n_best_m = %d | n_best_c = %d | k = %d\n", i,n_best_m,n_best_c-n_best_m, rand); printf("pop:\n"); printPopulation(new_pop);*/ } //Reproduction int j = 0; for(i; i < SIZE_POP; i++){ for(int k = 0; k < 4; k++){ *(new_pop + i*4 + k) = *(pop +j*4 + k); } j++; } } __global__ void calculateFitness(const int8_t *pop, double *fit, const double *data){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < SIZE_POP){ double arr_polyn[NUM_ROWS]; for(int j = 0; j < NUM_ROWS; j++){ double out_polyn = 0; for (int k = 0; k < 4; k++){ out_polyn += (*(pop + i*4 + k)*(pow(*(data + j*2 + 0), ((double)(4-(k+1)))))); } arr_polyn[j] = out_polyn; } double fitness_function = 0; for(int j = 0; j < NUM_ROWS; j++){ fitness_function += pow((*(data + j*2 + 1) - arr_polyn[j]), 2); } *(fit + i) = fitness_function; } } int main(void){ //Inicialize random number generaion time_t t; srand((unsigned) time(&t)); //Load data from .csv file double *data = (double *)malloc(NUM_ROWS*NUM_COLUMS*sizeof(double)); loadData(data); //error code to check return values for CUDA calls cudaError_t err = cudaSuccess; //variable for size of population; size_t sizePop = 4 * SIZE_POP * sizeof(int8_t); size_t sizeFit = SIZE_POP * sizeof(double); //allocation of memory for population on host int8_t *population = (int8_t *)malloc(sizePop); int8_t *new_population = (int8_t *)malloc(sizePop); double *fitness = (double *)malloc(sizeFit); //verification of succesed allocation if(population == NULL || data == NULL || fitness == NULL || new_population == NULL){ fprintf(stderr, "Failed to allocate host memory\n"); exit(EXIT_FAILURE); } //allocating of memory for population on device int8_t *d_population = NULL; err = cudaMalloc((void **)&d_population, sizePop); if(err != cudaSuccess){ fprintf(stderr, "Failed allocate memory for population on device(error code %s)\n",cudaGetErrorString(err)); exit(EXIT_FAILURE); } //allocating of memory for data on device double *d_data = NULL; err = cudaMalloc((void **)&d_data, (NUM_ROWS*NUM_COLUMS*sizeof(double))); //checking succeded allocation on device memory if(err != cudaSuccess){ fprintf(stderr, "Failed allocate memory for data on device(error code %s)\n",cudaGetErrorString(err)); exit(EXIT_FAILURE); } //allocating of memory for fitness on device double *d_fitness = NULL; err = cudaMalloc((void **)&d_fitness, sizeFit); //checking succeded allocation on device memory if(err != cudaSuccess){ fprintf(stderr, "Failed allocate memory for fitness on device(error code %s)\n",cudaGetErrorString(err)); exit(EXIT_FAILURE); } //BEGINING OF GA int e = 0; do{ e++; //-------- //Creating initial population createInitRandPop(population); //-------- //Checking termination criteria int gen = 0; while(gen != N_GEN){ //-------- //Evaluate fitness //copy population and data from host to device err = cudaMemcpy(d_population, population, sizePop, cudaMemcpyHostToDevice); if(err != cudaSuccess){ fprintf(stderr, "Failed copy population to device errCode: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_data, data, (NUM_ROWS*NUM_COLUMS*sizeof(double)), cudaMemcpyHostToDevice); if(err != cudaSuccess){ fprintf(stderr, "Failed copy data to device errCode: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int threadsPerBlock = 256; int blocksPerGrid = (SIZE_POP + threadsPerBlock - 1) / threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); calculateFitness<<<blocksPerGrid, threadsPerBlock>>>(d_population, d_fitness, d_data); err = cudaMemcpy(fitness, d_fitness, sizeFit, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy fitness from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); //sort population insertionSort(fitness, population); /*printPopulation(population); for(int c = 0; c < SIZE_POP; c++){ printf("%f\n", fitness[c]); }*/ printf("%d, %d, %d, %d | fitness = %f || gen: %d\n", *(population + 0),*(population + 1), *(population + 2),*(population + 3),*(fitness + 0), gen); //apply genetioc operation geneticOperations(population, new_population); //copy new population to old population for(int i = 0; i < SIZE_POP; i++){ for(int j = 0; j < 4; j++){ *(population + i*4 +j) = *(new_population + i*4 + j); } } //-------- gen++; } }while(*(fitness) != 0); free(population); free(new_population); free(data); free(fitness); cudaFree(d_population); cudaFree(d_data); cudaFree(d_fitness); return 0; }
9d41a0b60af0704ffdc6f800ba00940771200162.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* Implementing Breadth first search on CUDA using algorithm given in DAC'10 paper "An Effective GPU Implementation of Breadth-First Search" Copyright (c) 2010 University of Illinois at Urbana-Champaign. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Author: Lijiuan Luo ([email protected]) Revised for Parboil 2 Benchmark Suite by: Geng Daniel Liu ([email protected]) */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <deque> #include <iostream> #include "config.h" #include "parboil.h" FILE *fp; typedef int2 Node; typedef int2 Edge; #include "kernel.hip" const int h_top = 1; const int zero = 0; //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { //the number of nodes in the graph int num_of_nodes = 0; //the number of edges in the graph int num_of_edges = 0; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } pb_SwitchToTimer(&timers, pb_TimerID_IO); //Read in Graph from a file fp = fopen(params->inpFiles[0],"r"); if(!fp) { printf("Error Reading graph file\n"); return 0; } int source; fscanf(fp,"%d",&num_of_nodes); // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*num_of_nodes); int *color = (int*) malloc(sizeof(int)*num_of_nodes); int start, edgeno; // initalize the memory for( unsigned int i = 0; i < num_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].x = start; h_graph_nodes[i].y = edgeno; color[i]=WHITE; } //read the source node from the file fscanf(fp,"%d",&source); fscanf(fp,"%d",&num_of_edges); int id,cost; Edge* h_graph_edges = (Edge*) malloc(sizeof(Edge)*num_of_edges); for(int i=0; i < num_of_edges ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i].x = id; h_graph_edges[i].y = cost; } if(fp) fclose(fp); // allocate mem for the result on host side int* h_cost = (int*) malloc( sizeof(int)*num_of_nodes); for(int i = 0; i < num_of_nodes; i++){ h_cost[i] = INF; } h_cost[source] = 0; pb_SwitchToTimer(&timers, pb_TimerID_COPY); //Copy the Node list to device memory //Copy the Node list to device memory Node* d_graph_nodes; hipMalloc((void**) &d_graph_nodes, sizeof(Node)*num_of_nodes); hipMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node)*num_of_nodes, hipMemcpyHostToDevice); //Copy the Edge List to device Memory Edge* d_graph_edges; hipMalloc((void**) &d_graph_edges, sizeof(Edge)*num_of_edges); hipMemcpy(d_graph_edges, h_graph_edges, sizeof(Edge)*num_of_edges, hipMemcpyHostToDevice); int* d_color; hipMalloc((void**) &d_color, sizeof(int)*num_of_nodes); int* d_cost; hipMalloc((void**) &d_cost, sizeof(int)*num_of_nodes); int * d_q1; int * d_q2; hipMalloc((void**) &d_q1, sizeof(int)*num_of_nodes); hipMalloc((void**) &d_q2, sizeof(int)*num_of_nodes); int * tail; hipMalloc((void**) &tail, sizeof(int)); int *front_cost_d; hipMalloc((void**) &front_cost_d, sizeof(int)); hipMemcpy(d_color, color, sizeof(int)*num_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_cost, h_cost, sizeof(int)*num_of_nodes, hipMemcpyHostToDevice); //bind the texture memory with global memory hipBindTexture(0,g_graph_node_ref,d_graph_nodes, sizeof(Node)*num_of_nodes); hipBindTexture(0,g_graph_edge_ref,d_graph_edges,sizeof(Edge)*num_of_edges); printf("Starting GPU kernel\n"); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); int num_of_blocks; int num_of_threads_per_block; hipMemcpy(tail,&h_top,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(&d_cost[source],&zero,sizeof(int),hipMemcpyHostToDevice); hipMemcpy( &d_q1[0], &source, sizeof(int), hipMemcpyHostToDevice); int num_t;//number of threads int k=0;//BFS level index //whether or not to adjust "k", see comment on "BFS_kernel_multi_blk_inGPU" for more details int * switch_kd; hipMalloc((void**) &switch_kd, sizeof(int)); int * num_td;//number of threads hipMalloc((void**) &num_td, sizeof(int)); //whether to stay within a kernel, used in "BFS_kernel_multi_blk_inGPU" bool *stay; hipMalloc( (void**) &stay, sizeof(bool)); int switch_k; //max number of frontier nodes assigned to a block int * max_nodes_per_block_d; hipMalloc( (void**) &max_nodes_per_block_d, sizeof(int)); int *global_kt_d; hipMalloc( (void**) &global_kt_d, sizeof(int)); hipMemcpy(global_kt_d,&zero, sizeof(int),hipMemcpyHostToDevice); int h_overflow = 0; int *d_overflow; hipMalloc((void**) &d_overflow, sizeof(int)); hipMemcpy(d_overflow, &h_overflow, sizeof(int), hipMemcpyHostToDevice); do { hipMemcpy( &num_t, tail, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(tail,&zero,sizeof(int),hipMemcpyHostToDevice); if(num_t == 0){//frontier is empty hipFree(stay); hipFree(switch_kd); hipFree(num_td); break; } num_of_blocks = 1; num_of_threads_per_block = num_t; if(num_of_threads_per_block <NUM_BIN) num_of_threads_per_block = NUM_BIN; if(num_t>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_t/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } if(num_of_blocks == 1)//will call "BFS_in_GPU_kernel" num_of_threads_per_block = MAX_THREADS_PER_BLOCK; if(num_of_blocks >1 && num_of_blocks <= NUM_SM)// will call "BFS_kernel_multi_blk_inGPU" num_of_blocks = NUM_SM; //assume "num_of_blocks" can not be very large dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); if(k%2 == 0){ if(num_of_blocks == 1){ hipLaunchKernelGGL(( BFS_in_GPU_kernel), dim3(grid), dim3(threads) , 0, 0, d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost,num_t , tail,GRAY0,k,d_overflow); } else if(num_of_blocks <= NUM_SM){ (hipMemcpy(num_td,&num_t,sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( BFS_kernel_multi_blk_inGPU) , dim3(grid), dim3(threads) , 0, 0, d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY0,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow); (hipMemcpy(&switch_k,switch_kd, sizeof(int), hipMemcpyDeviceToHost)); if(!switch_k){ k--; } } else{ hipLaunchKernelGGL(( BFS_kernel), dim3(grid), dim3(threads) , 0, 0, d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY0,k,d_overflow); } } else{ if(num_of_blocks == 1){ hipLaunchKernelGGL(( BFS_in_GPU_kernel), dim3(grid), dim3(threads) , 0, 0, d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY1,k,d_overflow); } else if(num_of_blocks <= NUM_SM){ (hipMemcpy(num_td,&num_t,sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( BFS_kernel_multi_blk_inGPU) , dim3(grid), dim3(threads) , 0, 0, d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY1,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow); (hipMemcpy(&switch_k,switch_kd, sizeof(int), hipMemcpyDeviceToHost)); if(!switch_k){ k--; } } else{ hipLaunchKernelGGL(( BFS_kernel), dim3(grid), dim3(threads) , 0, 0, d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail, GRAY1,k,d_overflow); } } k++; hipMemcpy(&h_overflow, d_overflow, sizeof(int), hipMemcpyDeviceToHost); if(h_overflow) { printf("Error: local queue was overflown. Need to increase W_LOCAL_QUEUE\n"); return 0; } } while(1); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); printf("GPU kernel done\n"); // copy result from device to host hipMemcpy(h_cost, d_cost, sizeof(int)*num_of_nodes, hipMemcpyDeviceToHost); hipMemcpy(color, d_color, sizeof(int)*num_of_nodes, hipMemcpyDeviceToHost); hipUnbindTexture(g_graph_node_ref); hipUnbindTexture(g_graph_edge_ref); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(d_color); hipFree(d_cost); hipFree(tail); hipFree(front_cost_d); //Store the result into a file pb_SwitchToTimer(&timers, pb_TimerID_IO); FILE *fp = fopen(params->outFile,"w"); fprintf(fp, "%d\n", num_of_nodes); for(int i=0;i<num_of_nodes;i++) fprintf(fp,"%d %d\n",i,h_cost[i]); fclose(fp); // cleanup memory free( h_graph_nodes); free( h_graph_edges); free( color); free( h_cost); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
9d41a0b60af0704ffdc6f800ba00940771200162.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* Implementing Breadth first search on CUDA using algorithm given in DAC'10 paper "An Effective GPU Implementation of Breadth-First Search" Copyright (c) 2010 University of Illinois at Urbana-Champaign. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Author: Lijiuan Luo ([email protected]) Revised for Parboil 2 Benchmark Suite by: Geng Daniel Liu ([email protected]) */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <deque> #include <iostream> #include "config.h" #include "parboil.h" FILE *fp; typedef int2 Node; typedef int2 Edge; #include "kernel.cu" const int h_top = 1; const int zero = 0; //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { //the number of nodes in the graph int num_of_nodes = 0; //the number of edges in the graph int num_of_edges = 0; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } pb_SwitchToTimer(&timers, pb_TimerID_IO); //Read in Graph from a file fp = fopen(params->inpFiles[0],"r"); if(!fp) { printf("Error Reading graph file\n"); return 0; } int source; fscanf(fp,"%d",&num_of_nodes); // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*num_of_nodes); int *color = (int*) malloc(sizeof(int)*num_of_nodes); int start, edgeno; // initalize the memory for( unsigned int i = 0; i < num_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].x = start; h_graph_nodes[i].y = edgeno; color[i]=WHITE; } //read the source node from the file fscanf(fp,"%d",&source); fscanf(fp,"%d",&num_of_edges); int id,cost; Edge* h_graph_edges = (Edge*) malloc(sizeof(Edge)*num_of_edges); for(int i=0; i < num_of_edges ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i].x = id; h_graph_edges[i].y = cost; } if(fp) fclose(fp); // allocate mem for the result on host side int* h_cost = (int*) malloc( sizeof(int)*num_of_nodes); for(int i = 0; i < num_of_nodes; i++){ h_cost[i] = INF; } h_cost[source] = 0; pb_SwitchToTimer(&timers, pb_TimerID_COPY); //Copy the Node list to device memory //Copy the Node list to device memory Node* d_graph_nodes; cudaMalloc((void**) &d_graph_nodes, sizeof(Node)*num_of_nodes); cudaMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node)*num_of_nodes, cudaMemcpyHostToDevice); //Copy the Edge List to device Memory Edge* d_graph_edges; cudaMalloc((void**) &d_graph_edges, sizeof(Edge)*num_of_edges); cudaMemcpy(d_graph_edges, h_graph_edges, sizeof(Edge)*num_of_edges, cudaMemcpyHostToDevice); int* d_color; cudaMalloc((void**) &d_color, sizeof(int)*num_of_nodes); int* d_cost; cudaMalloc((void**) &d_cost, sizeof(int)*num_of_nodes); int * d_q1; int * d_q2; cudaMalloc((void**) &d_q1, sizeof(int)*num_of_nodes); cudaMalloc((void**) &d_q2, sizeof(int)*num_of_nodes); int * tail; cudaMalloc((void**) &tail, sizeof(int)); int *front_cost_d; cudaMalloc((void**) &front_cost_d, sizeof(int)); cudaMemcpy(d_color, color, sizeof(int)*num_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_cost, h_cost, sizeof(int)*num_of_nodes, cudaMemcpyHostToDevice); //bind the texture memory with global memory cudaBindTexture(0,g_graph_node_ref,d_graph_nodes, sizeof(Node)*num_of_nodes); cudaBindTexture(0,g_graph_edge_ref,d_graph_edges,sizeof(Edge)*num_of_edges); printf("Starting GPU kernel\n"); cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); int num_of_blocks; int num_of_threads_per_block; cudaMemcpy(tail,&h_top,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(&d_cost[source],&zero,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy( &d_q1[0], &source, sizeof(int), cudaMemcpyHostToDevice); int num_t;//number of threads int k=0;//BFS level index //whether or not to adjust "k", see comment on "BFS_kernel_multi_blk_inGPU" for more details int * switch_kd; cudaMalloc((void**) &switch_kd, sizeof(int)); int * num_td;//number of threads cudaMalloc((void**) &num_td, sizeof(int)); //whether to stay within a kernel, used in "BFS_kernel_multi_blk_inGPU" bool *stay; cudaMalloc( (void**) &stay, sizeof(bool)); int switch_k; //max number of frontier nodes assigned to a block int * max_nodes_per_block_d; cudaMalloc( (void**) &max_nodes_per_block_d, sizeof(int)); int *global_kt_d; cudaMalloc( (void**) &global_kt_d, sizeof(int)); cudaMemcpy(global_kt_d,&zero, sizeof(int),cudaMemcpyHostToDevice); int h_overflow = 0; int *d_overflow; cudaMalloc((void**) &d_overflow, sizeof(int)); cudaMemcpy(d_overflow, &h_overflow, sizeof(int), cudaMemcpyHostToDevice); do { cudaMemcpy( &num_t, tail, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(tail,&zero,sizeof(int),cudaMemcpyHostToDevice); if(num_t == 0){//frontier is empty cudaFree(stay); cudaFree(switch_kd); cudaFree(num_td); break; } num_of_blocks = 1; num_of_threads_per_block = num_t; if(num_of_threads_per_block <NUM_BIN) num_of_threads_per_block = NUM_BIN; if(num_t>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_t/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } if(num_of_blocks == 1)//will call "BFS_in_GPU_kernel" num_of_threads_per_block = MAX_THREADS_PER_BLOCK; if(num_of_blocks >1 && num_of_blocks <= NUM_SM)// will call "BFS_kernel_multi_blk_inGPU" num_of_blocks = NUM_SM; //assume "num_of_blocks" can not be very large dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); if(k%2 == 0){ if(num_of_blocks == 1){ BFS_in_GPU_kernel<<< grid, threads >>>(d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost,num_t , tail,GRAY0,k,d_overflow); } else if(num_of_blocks <= NUM_SM){ (cudaMemcpy(num_td,&num_t,sizeof(int), cudaMemcpyHostToDevice)); BFS_kernel_multi_blk_inGPU <<< grid, threads >>>(d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY0,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow); (cudaMemcpy(&switch_k,switch_kd, sizeof(int), cudaMemcpyDeviceToHost)); if(!switch_k){ k--; } } else{ BFS_kernel<<< grid, threads >>>(d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY0,k,d_overflow); } } else{ if(num_of_blocks == 1){ BFS_in_GPU_kernel<<< grid, threads >>>(d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY1,k,d_overflow); } else if(num_of_blocks <= NUM_SM){ (cudaMemcpy(num_td,&num_t,sizeof(int), cudaMemcpyHostToDevice)); BFS_kernel_multi_blk_inGPU <<< grid, threads >>>(d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY1,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow); (cudaMemcpy(&switch_k,switch_kd, sizeof(int), cudaMemcpyDeviceToHost)); if(!switch_k){ k--; } } else{ BFS_kernel<<< grid, threads >>>(d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail, GRAY1,k,d_overflow); } } k++; cudaMemcpy(&h_overflow, d_overflow, sizeof(int), cudaMemcpyDeviceToHost); if(h_overflow) { printf("Error: local queue was overflown. Need to increase W_LOCAL_QUEUE\n"); return 0; } } while(1); cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); printf("GPU kernel done\n"); // copy result from device to host cudaMemcpy(h_cost, d_cost, sizeof(int)*num_of_nodes, cudaMemcpyDeviceToHost); cudaMemcpy(color, d_color, sizeof(int)*num_of_nodes, cudaMemcpyDeviceToHost); cudaUnbindTexture(g_graph_node_ref); cudaUnbindTexture(g_graph_edge_ref); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(d_color); cudaFree(d_cost); cudaFree(tail); cudaFree(front_cost_d); //Store the result into a file pb_SwitchToTimer(&timers, pb_TimerID_IO); FILE *fp = fopen(params->outFile,"w"); fprintf(fp, "%d\n", num_of_nodes); for(int i=0;i<num_of_nodes;i++) fprintf(fp,"%d %d\n",i,h_cost[i]); fclose(fp); // cleanup memory free( h_graph_nodes); free( h_graph_edges); free( color); free( h_cost); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
b9d6bdcd346db71f0b8b39a6a6fee5c9fa3c9d2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> using namespace std; #define N 10 // rows #define M 10 // columns #define Num_threads_x 2 #define Num_threads_y 4 #define Num_elements 100 __global__ void addMatrixCUDA(const int *a, const int *b, int *c) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = y * N + x; if (i < Num_elements) c[i] = a[i] + b[i]; } void addMatrix(int *a, int *b, int *c) { int *dev_a; int *dev_b; int *dev_c; dim3 blocks(Num_elements/Num_threads_x, Num_elements/Num_threads_y); dim3 threads(Num_threads_x, Num_threads_y); hipMalloc((void**)&dev_c, Num_elements * sizeof(int)); hipMalloc((void**)&dev_a, Num_elements * sizeof(int)); hipMalloc((void**)&dev_b, Num_elements * sizeof(int)); hipMemcpy(dev_a, a, Num_elements * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, Num_elements * sizeof(int), hipMemcpyHostToDevice); addMatrixCUDA << <blocks, threads >> > (dev_a, dev_b, dev_c); hipDeviceSynchronize(); hipMemcpy(c, dev_c, Num_elements * sizeof(int), hipMemcpyDeviceToHost); } int main() { int a[Num_elements], b[Num_elements], c[Num_elements]; for (int i = 0; i < Num_elements; i++) { a[i] = -i; b[i] = i * i; } addMatrix(a, b, c); for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { int idx = i * N + j; cout << c[idx] << '\t'; } cout << endl; } return 0; }
b9d6bdcd346db71f0b8b39a6a6fee5c9fa3c9d2a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> using namespace std; #define N 10 // rows #define M 10 // columns #define Num_threads_x 2 #define Num_threads_y 4 #define Num_elements 100 __global__ void addMatrixCUDA(const int *a, const int *b, int *c) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = y * N + x; if (i < Num_elements) c[i] = a[i] + b[i]; } void addMatrix(int *a, int *b, int *c) { int *dev_a; int *dev_b; int *dev_c; dim3 blocks(Num_elements/Num_threads_x, Num_elements/Num_threads_y); dim3 threads(Num_threads_x, Num_threads_y); cudaMalloc((void**)&dev_c, Num_elements * sizeof(int)); cudaMalloc((void**)&dev_a, Num_elements * sizeof(int)); cudaMalloc((void**)&dev_b, Num_elements * sizeof(int)); cudaMemcpy(dev_a, a, Num_elements * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, Num_elements * sizeof(int), cudaMemcpyHostToDevice); addMatrixCUDA << <blocks, threads >> > (dev_a, dev_b, dev_c); cudaDeviceSynchronize(); cudaMemcpy(c, dev_c, Num_elements * sizeof(int), cudaMemcpyDeviceToHost); } int main() { int a[Num_elements], b[Num_elements], c[Num_elements]; for (int i = 0; i < Num_elements; i++) { a[i] = -i; b[i] = i * i; } addMatrix(a, b, c); for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { int idx = i * N + j; cout << c[idx] << '\t'; } cout << endl; } return 0; }
8585c1780ee078b1bc047ada1d00e4ee0cc52317.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define M 4096 #define N 4096 #define P 4096 #define BLOCKS (M * P - 1)/1024 + 1 __global__ void matrixMultiply(int *out, int *matrixA, int *matrixB) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < M * P) { int row = threadId % M; int column = threadId % P; int sum = 0; for (int i = 0; i < N; i++) { sum += *(matrixA + i + N *row) * *(matrixB + i * P + column); } *(out + column * M + row) = sum; } } int main() { int *matrixA = (int*)malloc(M * N * sizeof(int)); int *matrixB = (int*)malloc(N * P * sizeof(int)); int *out = (int*)malloc(M * P * sizeof(int)); int *d_matrixA, *d_matrixB, *d_out; int threadCount = 1024; if (BLOCKS == 1) { threadCount = M * P; } for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { *(matrixA + i * N + j) = rand(); } } for (int i = 0; i < N; i++) { for (int j = 0; j < P; j++) { *(matrixB + i * P + j) = rand(); } } //allocate device memory for matrix A hipMalloc((void**)&d_matrixA, M * N * sizeof(int)); //transfer matrix A from host to device memory hipMemcpy(d_matrixA, matrixA, M * N * sizeof(int), hipMemcpyHostToDevice); //allocate device memory for matrix B hipMalloc((void**)&d_matrixB, N * P * sizeof(int)); //transfer matrix B from host to device memory hipMemcpy(d_matrixB, matrixB, N * P * sizeof(int), hipMemcpyHostToDevice); //allocate device memory for output hipMalloc((void**)&d_out, M * P * sizeof(int)); //Threads are N * P, but split into blocks, where appropriate hipLaunchKernelGGL(( matrixMultiply), dim3(BLOCKS), dim3(threadCount), 0, 0, d_out, d_matrixA, d_matrixB); //transfer output from device memory to host memory hipMemcpy(out, d_out, M * P * sizeof(int), hipMemcpyDeviceToHost); //For printing the output, if necessary /* for (int i = 0; i < M; i++) { for (int j = 0; j < P; j++) { printf("%d ", *(out + i + j * M)); } printf("\n"); } */ //free device memory hipFree(d_matrixA); hipFree(d_matrixB); hipFree(d_out); //free host memory free(matrixA); free(matrixB); free(out); return 0; }
8585c1780ee078b1bc047ada1d00e4ee0cc52317.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define M 4096 #define N 4096 #define P 4096 #define BLOCKS (M * P - 1)/1024 + 1 __global__ void matrixMultiply(int *out, int *matrixA, int *matrixB) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < M * P) { int row = threadId % M; int column = threadId % P; int sum = 0; for (int i = 0; i < N; i++) { sum += *(matrixA + i + N *row) * *(matrixB + i * P + column); } *(out + column * M + row) = sum; } } int main() { int *matrixA = (int*)malloc(M * N * sizeof(int)); int *matrixB = (int*)malloc(N * P * sizeof(int)); int *out = (int*)malloc(M * P * sizeof(int)); int *d_matrixA, *d_matrixB, *d_out; int threadCount = 1024; if (BLOCKS == 1) { threadCount = M * P; } for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { *(matrixA + i * N + j) = rand(); } } for (int i = 0; i < N; i++) { for (int j = 0; j < P; j++) { *(matrixB + i * P + j) = rand(); } } //allocate device memory for matrix A cudaMalloc((void**)&d_matrixA, M * N * sizeof(int)); //transfer matrix A from host to device memory cudaMemcpy(d_matrixA, matrixA, M * N * sizeof(int), cudaMemcpyHostToDevice); //allocate device memory for matrix B cudaMalloc((void**)&d_matrixB, N * P * sizeof(int)); //transfer matrix B from host to device memory cudaMemcpy(d_matrixB, matrixB, N * P * sizeof(int), cudaMemcpyHostToDevice); //allocate device memory for output cudaMalloc((void**)&d_out, M * P * sizeof(int)); //Threads are N * P, but split into blocks, where appropriate matrixMultiply<<<BLOCKS, threadCount>>>(d_out, d_matrixA, d_matrixB); //transfer output from device memory to host memory cudaMemcpy(out, d_out, M * P * sizeof(int), cudaMemcpyDeviceToHost); //For printing the output, if necessary /* for (int i = 0; i < M; i++) { for (int j = 0; j < P; j++) { printf("%d ", *(out + i + j * M)); } printf("\n"); } */ //free device memory cudaFree(d_matrixA); cudaFree(d_matrixB); cudaFree(d_out); //free host memory free(matrixA); free(matrixB); free(out); return 0; }
5c7141cf54a9d14f3464f77ab9cbcdcd32942dac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define MATRIX_ROWS 5 #define MATRIX_COLUMNS 5 #define SHARED_MEMORY_PADDING 1 __global__ void createMatrixStatic(float* out) { __shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS]; int idx = blockIdx.y * blockDim.x + threadIdx.x; int idy = blockIdx.x * blockDim.y + threadIdx.y; if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) { matrix[idy][idx] = idx + idy; out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx]; } } __global__ void createMatrixStaticPadding(float* out) { __shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING]; int idx = threadIdx.y * blockDim.x + threadIdx.x; matrix[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = matrix[threadIdx.y][threadIdx.x]; } void printMatrix(float* matrix) { for (int y = 0; y < MATRIX_COLUMNS; y++) { for (int x = 0; x < MATRIX_ROWS; x++) { printf("%-3d ", int(matrix[y*MATRIX_ROWS+x])); } printf("\n"); } } int main(void) { printf("\n"); dim3 block(MATRIX_ROWS, MATRIX_COLUMNS); dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y); float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float)); float* device_matrix; hipMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float)); hipLaunchKernelGGL(( createMatrixStatic), dim3(grid),dim3(block), 0, 0, device_matrix); hipDeviceSynchronize(); hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost); printf("createMatrixStatic\n"); printMatrix(host_matrix); printf("\n"); hipLaunchKernelGGL(( createMatrixStaticPadding), dim3(grid.x),dim3(block.x), 0, 0, device_matrix); hipDeviceSynchronize(); hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost); printf("createMatrixStaticPadding\n"); printMatrix(host_matrix); printf("\n"); free(host_matrix); hipFree(device_matrix); hipDeviceReset(); printf("\n"); return 0; }
5c7141cf54a9d14f3464f77ab9cbcdcd32942dac.cu
#include <stdio.h> #define MATRIX_ROWS 5 #define MATRIX_COLUMNS 5 #define SHARED_MEMORY_PADDING 1 __global__ void createMatrixStatic(float* out) { __shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS]; int idx = blockIdx.y * blockDim.x + threadIdx.x; int idy = blockIdx.x * blockDim.y + threadIdx.y; if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) { matrix[idy][idx] = idx + idy; out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx]; } } __global__ void createMatrixStaticPadding(float* out) { __shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING]; int idx = threadIdx.y * blockDim.x + threadIdx.x; matrix[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = matrix[threadIdx.y][threadIdx.x]; } void printMatrix(float* matrix) { for (int y = 0; y < MATRIX_COLUMNS; y++) { for (int x = 0; x < MATRIX_ROWS; x++) { printf("%-3d ", int(matrix[y*MATRIX_ROWS+x])); } printf("\n"); } } int main(void) { printf("\n"); dim3 block(MATRIX_ROWS, MATRIX_COLUMNS); dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y); float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float)); float* device_matrix; cudaMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float)); createMatrixStatic<<<grid,block>>>(device_matrix); cudaDeviceSynchronize(); cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost); printf("createMatrixStatic\n"); printMatrix(host_matrix); printf("\n"); createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix); cudaDeviceSynchronize(); cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost); printf("createMatrixStaticPadding\n"); printMatrix(host_matrix); printf("\n"); free(host_matrix); cudaFree(device_matrix); cudaDeviceReset(); printf("\n"); return 0; }
0e92b4db5429582c8457c781d0d5483e6ad24ccb.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass/wmma_matrix.h" #if defined(CUTLASS_USE_WMMA_API) #include "cutlass_unit_test.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/wmma_gemm_traits.h" #include "tools/test/unit/gemm/gemm_testbed.h" #include "tools/test/unit/gemm/run_gemm.h" //////////////////////////////////////////////////////////////////////////////////////////////////// // // FP16 accumulation // //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32_f16, wmma_gemm_16x16x16_nn) { typedef cutlass::gemm::WmmaGemmTraits< cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16>, half, half, half, cutlass::gemm::LinearScaling<half>, half > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32_f16, wmma_gemm_16x16x32_nn) { typedef cutlass::gemm::WmmaGemmTraits< cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16>, half, half, half, cutlass::gemm::LinearScaling<half>, half > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32_f16, wmma_16x16x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits< cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, half, cutlass::gemm::LinearScaling<half>, half > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// // // FP32 accumulation // //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #endif // defined CUTLASS_USE_WMMA_API
0e92b4db5429582c8457c781d0d5483e6ad24ccb.cu
/*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass/wmma_matrix.h" #if defined(CUTLASS_USE_WMMA_API) #include "cutlass_unit_test.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/wmma_gemm_traits.h" #include "tools/test/unit/gemm/gemm_testbed.h" #include "tools/test/unit/gemm/run_gemm.h" //////////////////////////////////////////////////////////////////////////////////////////////////// // // FP16 accumulation // //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32_f16, wmma_gemm_16x16x16_nn) { typedef cutlass::gemm::WmmaGemmTraits< cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16>, half, half, half, cutlass::gemm::LinearScaling<half>, half > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32_f16, wmma_gemm_16x16x32_nn) { typedef cutlass::gemm::WmmaGemmTraits< cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16>, half, half, half, cutlass::gemm::LinearScaling<half>, half > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32_f16, wmma_16x16x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits< cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, half, cutlass::gemm::LinearScaling<half>, half > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// // // FP32 accumulation // //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_nt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_nn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_tt) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kRowMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x16_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 16); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_16x16x32, wmma_gemm_16x16x32_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 16, 16> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(16, 16, 32); } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(WmmaGemm_128x128x32, wmma_16x16x16_gemm_256x256x128_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_8x32x16_gemm_256x256x128_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 32, 8> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUDA_VERSION) && CUDA_VERSION >= 9100 TEST(WmmaGemm_128x128x32, wmma_32x8x16_gemm_256x256x128_tn) { typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<32, 128, 128>, half, half, float, cutlass::gemm::LinearScaling<float>, float, cutlass::Shape<32, 64, 64>, cutlass::Shape<16, 8, 32> > WmmaGemmTraits; run_gemm<WmmaGemmTraits>(256, 256, 128); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #endif // defined CUTLASS_USE_WMMA_API
a6b7bcd1d9db9dc316ac8258e49ab36cf5a68a1d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "pw_copy_rc_cu_z.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *din = NULL; hipMalloc(&din, XSIZE*YSIZE); double *zout = NULL; hipMalloc(&zout, XSIZE*YSIZE); const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( pw_copy_rc_cu_z), dim3(gridBlock),dim3(threadBlock), 0, 0, din,zout,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( pw_copy_rc_cu_z), dim3(gridBlock),dim3(threadBlock), 0, 0, din,zout,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( pw_copy_rc_cu_z), dim3(gridBlock),dim3(threadBlock), 0, 0, din,zout,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a6b7bcd1d9db9dc316ac8258e49ab36cf5a68a1d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "pw_copy_rc_cu_z.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *din = NULL; cudaMalloc(&din, XSIZE*YSIZE); double *zout = NULL; cudaMalloc(&zout, XSIZE*YSIZE); const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); pw_copy_rc_cu_z<<<gridBlock,threadBlock>>>(din,zout,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { pw_copy_rc_cu_z<<<gridBlock,threadBlock>>>(din,zout,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { pw_copy_rc_cu_z<<<gridBlock,threadBlock>>>(din,zout,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4f2a68ee137702e1c0420f308ba92c79b6f2f269.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <ops/declarable/helpers/im2col.h> #include <ops/declarable/helpers/col2im.h> #include<ops/declarable/helpers/addBias.h> #include <exceptions/cuda_exception.h> #include <NDArrayFactory.h> #include <MmulHelper.h> #include <PointersManager.h> #include <templatemath.h> namespace nd4j { namespace ops { ////////////////////////////////////////////////////////////////////////// // vol [bS, iC, iD, iH, iW] is convoluted to col [bS, iC, kD, kH, kW, oD, oH, oW] template <typename T> static __global__ void vol2colCuda(const void* volume, const Nd4jLong* volShapeInfo, void* columns, const Nd4jLong* colShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { const T* vol = reinterpret_cast<const T*>(volume); T* col = reinterpret_cast<T*>(columns); __shared__ int colRank, volRank; __shared__ Nd4jLong colLen, iD, iH, iW, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); volRank = 5; colRank = 8; colLen = shape::length(colShapeInfo); iD = volShapeInfo[3]; iH = volShapeInfo[4]; iW = volShapeInfo[5]; } __syncthreads(); const auto colInd = threadIdx.x + blockIdx.x * blockDim.x; if(colInd >= colLen) return; auto coords = sharedMem + threadIdx.x * colRank; shape::index2coords(colInd, colShapeInfo, coords); // const auto colW = coords[7]; // const auto colH = coords[6]; // const auto colD = coords[5]; // const auto kCol = coords[4]; // const auto kRow = coords[3]; // const auto kDep = coords[2]; // const auto c = coords[1]; // const auto b = coords[0]; const auto colOffset = shape::getOffset(colShapeInfo, coords); coords[2] = -pD + coords[2] * dD + coords[5] * sD; // const auto volDep = (-pD + kDep * dD) + colD * sD; coords[3] = -pH + coords[3] * dH + coords[6] * sH; // const auto volRow = (-pH + kRow * dH) + colH * sH; coords[4] = -pW + coords[4] * dW + coords[7] * sW; // const auto volCol = (-pW + kCol * dW) + colW * sW; if (static_cast<unsigned>(coords[2]) >= static_cast<unsigned>(iD) || static_cast<unsigned>(coords[3]) >= static_cast<unsigned>(iH) || static_cast<unsigned>(coords[4]) >= static_cast<unsigned>(iW)) col[colOffset] = static_cast<T>(0.); else col[colOffset] = vol[shape::getOffset(volShapeInfo, coords)]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void vol2colCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* volume, const Nd4jLong* volShapeInfo, void* columns, const Nd4jLong* colShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { hipLaunchKernelGGL(( vol2colCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, volume, volShapeInfo, columns, colShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::vol2col(nd4j::graph::Context& block, const NDArray& vol, NDArray& col, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { PointersManager manager(block.launchContext(), "vol2col"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (col.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = col.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&col}, {&vol}); BUILD_SINGLE_SELECTOR(vol.dataType(), vol2colCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), vol.getSpecialBuffer(), vol.getSpecialShapeInfo(), col.specialBuffer(), col.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES); NDArray::registerSpecialUse({&col}, {&vol}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// // columns [bS, iC, kD, kH, kW, oD, oH, oW] to be de-convoluted to volume [bS, iC, iD, iH, iW] template <typename T> static __global__ void col2volCuda(const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { const T* col = reinterpret_cast<const T*>(columns); T* vol = reinterpret_cast<T*>(volume); __shared__ uint kD, kH, kW, oD, oH, oW, *sharedMem; __shared__ Nd4jLong volLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<uint*>(shmem); oD = colShapeInfo[6]; oH = colShapeInfo[7]; oW = colShapeInfo[8]; kD = dD * (colShapeInfo[3] - 1) + 1; kH = dH * (colShapeInfo[4] - 1) + 1; kW = dW * (colShapeInfo[5] - 1) + 1; volLen = shape::length(volShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * 8; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < volLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, volShapeInfo, coords); const auto volOffset = shape::getOffset(volShapeInfo, coords); const auto bSiCoffset = coords[0] * colShapeInfo[9] + coords[1] * colShapeInfo[10]; const uint imD = coords[2] + pD; const uint imH = coords[3] + pH; const uint imW = coords[4] + pW; const uint colDstart = (imD < kD) ? 0 : (imD - kD) / sD + 1; const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1; const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1; const uint colDend = nd4j::math::nd4j_min<uint>(imD / sD + 1, oD); const uint colHend = nd4j::math::nd4j_min<uint>(imH / sH + 1, oH); const uint colWend = nd4j::math::nd4j_min<uint>(imW / sW + 1, oW); T val = 0; for(uint colD = colDstart; colD < colDend; ++colD) { coords[2] = imD - colD * sD; if(coords[2] % dD != 0) continue; for(uint colH = colHstart; colH < colHend; ++colH) { coords[3] = imH - colH * sH; if(coords[3] % dH != 0) continue; for(uint colW = colWstart; colW < colWend; ++colW) { coords[4] = imW - colW * sW; if(coords[4] % dW != 0) continue; val += col[bSiCoffset + (coords[2]/dD)*colShapeInfo[11] + (coords[3]/dH)*colShapeInfo[12] + (coords[4]/dW)*colShapeInfo[13] + colD*colShapeInfo[14] + colH*colShapeInfo[15] + colW*colShapeInfo[16]]; } } } vol[volOffset] = val; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void col2volCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { hipLaunchKernelGGL(( col2volCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, columns, colShapeInfo, volume, volShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::col2vol(nd4j::graph::Context& block, const NDArray& col, NDArray& vol, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { PointersManager manager(block.launchContext(), "col2vol"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (vol.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256; NDArray::prepareSpecialUse({&vol}, {&col}); BUILD_SINGLE_SELECTOR(vol.dataType(), col2volCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), vol.specialBuffer(), vol.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES); NDArray::registerSpecialUse({&vol}, {&col}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void conv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weights [kH, kW, iC, oC] always // bias [oC] // output [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 1-NCHW, 0-NHWC int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<int> permutForOutput; if(isNCHW) permutForOutput = {0, 3, 1, 2}; // [bS, oH, oW, oC] -> [bS, oC, oH, oW] else input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] if NHWC NDArray col('c', {bS, oH, oW, kH, kW, iC}, input->dataType(), input->getContext()); NDArray colP = col.permute({0, 5, 3, 4, 1, 2}); // {bS, iC, kH, kW, oH, oW} NDArray mmulResult('f', {bS*oH*oW, oC}, output->dataType(), output->getContext()); //----- calculation of output -----// auto ctx = block.launchContext(); helpers::im2col(*ctx, *input, colP, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] MmulHelper::tensorDot(&col, weights, &mmulResult, {3,4,5}, {0,1,2}, {}); // [bS, oH, oW, kH, kW, iC] x [kH, kW, iC, oC] = [bS, oH, oW, oC] //----- assign outTemp to output -----// if(isNCHW) { mmulResult.reshapei({bS, oH, oW, oC}); mmulResult.permutei(permutForOutput); } output->assign(mmulResult); //----- add biases if required -----// if(bias) // output->applyBroadcast(broadcast::Add, {indIOioC}, bias); helpers::addBias(block, *output, *bias, *output, isNCHW); if(!isNCHW) delete input; } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::conv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void depthwiseConv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weights [kH, kW, iC, mC] always // bias [oC] = iC*mC // output [bS, oH, oW, iC*mC] (NHWC) or [bS, iC*mC, oH, oW] (NCHW) // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 0-NCHW, 1-NHWC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier std::vector<std::vector<Nd4jLong>> modifColumns = {{1,0,4,5,2,3}, {iC,bS*oH*oW,kH*kW}}; // [bS,iC,kH,kW,oH,oW] -> [iC,bS,oH,oW,kH,kW] -> [iC,bS*oH*oW,kH*kW] std::vector<std::vector<Nd4jLong>> modifOutput; std::vector<Nd4jLong> outReShape; if(!isNCHW) { outReShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC] modifOutput = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW] } else { outReShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW] modifOutput = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] } if(paddingMode == 1) // SAME ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW); NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext()); NDArray outputReshaped = output->reshape(output->ordering(), outReShape, false); helpers::im2col(*output->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] MmulHelper::tensorDot(&columns, weights, &outputReshaped, modifColumns, {{2,0,1,3},{iC,kH*kW,mC}}, modifOutput); // [iC, bS*oH*oW, kW*kH] x [iC, kH*kW, mC] = [iC, bS*oH*oW, mC] if(bias) // output->applyBroadcast(broadcast::Add, {indIOioC}, bias); helpers::addBias(block, *output, *bias, *output, isNCHW); if(!isNCHW) delete input; } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::depthwiseConv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void sconv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weightsDepth [kH, kW, iC, mC] always // weightsPoint [1, 1, iC*mC, oC] always // bias [oC], oC = iC*mC if weightsPoint=nullptr // output is [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 1-NCHW, 0-NHWC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier, output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weightsDepth->sizeAt(indWmC); // channels multiplier NDArray* outputDepth = output; if(weightsPoint) // if pointwise convolution is expected outputDepth = new NDArray(output->ordering(), !isNCHW ? std::vector<Nd4jLong>({bS, oH, oW, iC*mC}) : std::vector<Nd4jLong>({bS, iC*mC, oH, oW}), input->dataType(), input->getContext()); // ----- perform depthwise convolution (if weightsPoint is absent then oC = iC*mC) ----- // ConvolutionUtils::depthwiseConv2d(block, input, weightsDepth, weightsPoint ? nullptr : bias, outputDepth, kH,kW, sH,sW, pH,pW, dH,dW, paddingMode, isNCHW); // ----- perform pointwise convolution (oH = iH, oW = iW) ----- // if (weightsPoint) { ConvolutionUtils::conv2d(block, outputDepth, weightsPoint, bias, output, 1,1, 1,1, 0,0, 1,1, paddingMode, isNCHW); // in this case oH=iH, oW=iW delete outputDepth; } } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::sconv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), sconv2d_, (block, input, weightsDepth, weightsPoint, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void avgPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z sum = 0.0f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += static_cast<Z>(inSlice[h * strideY + w * strideX]); int divide_factor = pool_size; //Case 0: exclude padding if (extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void avgPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { hipLaunchKernelGGL(( avgPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void pnormPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if (hstart < 0) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) -hstart / (Z) dH); hstart += f * dH; } if (wstart < 0) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) -wstart / (Z) dW); wstart += f * dW; } if (hend > iH) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) (hend - iH) / (Z) dH); hend -= f * dH; } if (wend > iW) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) (wend - iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = nd4j::math::nd4j_ceil<double, int>((double) (hend - hstart) / (double) dH) * nd4j::math::nd4j_ceil<double, int>((double) (wend - wstart) / (double) dW); Z sum = 0.f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += nd4j::math::nd4j_pow<Z, Z, Z>(static_cast<Z>(nd4j::math::nd4j_abs<X>(inSlice[h * strideY + w * strideX])), extraParam0); z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = nd4j::math::nd4j_pow<Z, Z, Z>(sum, (Z) 1.0f / extraParam0); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void pnormPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { hipLaunchKernelGGL(( pnormPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void maxPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z max = -nd4j::DataTypeUtils::max<Z>(); const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]); if (v > max) max = v; } } z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max; } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void maxPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { hipLaunchKernelGGL(( maxPooling2dCuda<X,Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const PoolingType poolingMode, const int extraParam0) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); switch (poolingMode) { case MAX_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), maxPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; case AVG_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), avgPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; case PNORM_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), pnormPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; default: throw std::runtime_error("Pooling2D: Unknown PoolingType used"); } output.tickWriteDevice(); input.tickReadDevice(); auto result = hipStreamSynchronize(*block.launchContext()->getCudaStream()); if (result != 0) throw cuda_exception::build("Pooling2D failed", result); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x input is [bS, iC, iD, iH, iW] // z output is [bS, iC, oD, oH, oW] const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) max = val; } } } z[zOffset] = max; } break; /*** avg ***/ case 1: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)]; if (extraParam0 == 0) { //Exclude padding uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1); uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1); uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1); sum /= static_cast<T>(a * b * c); // /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation } else if (extraParam0 == 1) //Include padding sum /= kProd; z[zOffset] = sum; } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); sum = nd4j::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0); z[zOffset] = sum; } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { hipLaunchKernelGGL(( pooling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { PointersManager manager(block.launchContext(), "pooling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x: input [bS, iC, iH, iW] // y: gradO [bS, iC, oH, oW] // z: gradI [bS, iC, iH, iW] -> gradI is output in this function const T* x = reinterpret_cast<const T*>(vx); const T* y = reinterpret_cast<const T*>(vy); T* z = reinterpret_cast<T*>(vz); Nd4jLong coord2, coord3; __shared__ int rank, kHeff, kWeff, iH, iW, kProd; __shared__ Nd4jLong *sharedMem, yLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); yLen = shape::length(yShapeInfo); rank = 4; kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iH = xShapeInfo[3]; iW = xShapeInfo[4]; kProd = kH * kW; } __syncthreads(); const auto yInd = threadIdx.x + blockIdx.x * blockDim.x; if(yInd >= yLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(yInd, yShapeInfo, coords); const auto yOffset = shape::getOffset(yShapeInfo, coords); int hstart = coords[2] * sH - pH; int wstart = coords[3] * sW - pW; int hend = hstart + kHeff; int wend = wstart + kWeff; if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { coord2 = hstart; coord3 = wstart; T max = -DataTypeUtils::max<T>(); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW){ T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) { max = val; coord2 = coords[2]; coord3 = coords[3]; } } } coords[2] = coord2; coords[3] = coord3; auto zOffset = shape::getOffset(zShapeInfo, coords); nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], y[yOffset]); //z[zOffset] += y[yOffset]; } break; /*** avg ***/ case 1: { T val = y[yOffset]; if (extraParam0 == 0) //Exclude padding val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation else if (extraParam0 == 1) //Include padding val /= kProd; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val); } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); T val = y[yOffset]; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) { const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset])); } } } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { hipLaunchKernelGGL(( pooling2dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // initial zeroing of gradI gradI.nullify(); PointersManager manager(block.launchContext(), "pooling2dBP"); const int threadsPerBlock = 256; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x: input [bS, iC, iD, iH, iW] // y: gradO [bS, iC, oD, oH, oW] // z: gradI [bS, iC, iD, iH, iW] -> gradI is output in this function const T* x = reinterpret_cast<const T*>(vx); const T* y = reinterpret_cast<const T*>(vy); T* z = reinterpret_cast<T*>(vz); Nd4jLong coord2, coord3, coord4; __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong *sharedMem, yLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); yLen = shape::length(yShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto yInd = threadIdx.x + blockIdx.x * blockDim.x; if(yInd >= yLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(yInd, yShapeInfo, coords); const auto yOffset = shape::getOffset(yShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) { max = val; coord2 = coords[2]; coord3 = coords[3]; coord4 = coords[4]; } } } } coords[2] = coord2; coords[3] = coord3; coords[4] = coord4; nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], y[yOffset]); } break; /*** avg ***/ case 1: { T val = y[yOffset]; if (extraParam0 == 0) //Exclude padding val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation else if (extraParam0 == 1) //Include padding val /= kProd; for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val); } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); T val = y[yOffset]; for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) { for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset])); } } } } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { hipLaunchKernelGGL(( pooling3dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling3dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // initial zeroing of gradI gradI.nullify(); PointersManager manager(block.launchContext(), "pooling3dBP"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void conv2dBP_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weights [kH, kW, iC, oC] always // bias [oC] // gradO [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next // gradI [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon // gradW [kH, kW, iC, oC] always // gradB [oC] // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 0-NHWC, 1-NCHW int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<int> gradOaxesForDot; if(!isNCHW) { gradOaxesForDot = {0, 1, 2}; // bS, oH, oW input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] } else { gradOaxesForDot = {0, 2, 3}; // bS, oH, oW } NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext()); // ----- calculation of gradW ----- // if(gradW) { auto ctx = block.launchContext(); helpers::im2col(*ctx, *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] nd4j::MmulHelper::tensorDot(&columns, gradO, gradW, {0,4,5}, gradOaxesForDot, {2, 0, 1, 3}); // [bS, iC, kH, kW, oH, oW] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [iC, kH, kW, oC] } // ----- calculation of gradB ----- // if(gradB) { NDArray* gradBR = gradB; if(gradB->rankOf() == 2) gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()})); gradO->reduceAlongDimension(reduce::Sum, *gradBR, gradOaxesForDot, false); // sum over bS, oH, oW if(gradBR != gradB) delete gradBR; } //----- calculation of gradI -----// nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {indWoC}, {indIOioC}, {2, 3, 1, 0, 4, 5}); // [kH, kW, iC, oC]/[oC, iC, kH, kW]] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [kH, kW, iC, bS, oH, oW] helpers::col2im(*block.launchContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW] if(!isNCHW) { delete input; delete gradI; } } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::conv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2dBP_, (block, input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void depthwiseConv2dBP_(const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW) // weights [kH, kW, iC, mC] always // bias [oC] = [iC*mC] // gradO [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next // gradI [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW), epsilon // gradW [kH, kW, iC, mC] always // gradB [oC] // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 0-NHWC, 1-NCHW int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier std::vector<std::vector<Nd4jLong>> modifColumns = {{1,2,3,0,4,5}, {iC, kH*kW, bS*oH*oW}}; // [bS,iC,kH,kW,oH,oW] -> [iC, kH*kW, bS*oH*oW] std::vector<std::vector<Nd4jLong>> modifGradO1, modifGradO2; std::vector<Nd4jLong> gradOreShape; if(!isNCHW) { gradOreShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC] modifGradO1 = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] modifGradO2 = {{3,0,1,2},{iC, mC, bS*oH*oW}}; // [bS,oH,oW,iC*mC] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW] input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW] gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW] } else { gradOreShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW] modifGradO1 = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] modifGradO2 = {{1,0,2,3},{iC, mC, bS*oH*oW}}; // [bS,iC*mC,oH,oW] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW] } if(paddingMode == 1) // SAME ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW); NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext()); NDArray gradOreshaped = gradO->reshape(gradO->ordering(), gradOreShape); // ----- calculation of gradW and gradB ----- // helpers::im2col(*input->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] nd4j::MmulHelper::tensorDot(&columns, &gradOreshaped, gradW, modifColumns, modifGradO1, {{2,0,1,3},{iC,kH*kW,mC}}); // [iC, kW*kH, bS*oH*oW] x [iC, bS*oH*oW, mC] = [iC, kH*kW, mC] // ----- calculation of gradB ----- // if(gradB) { NDArray* gradBR = gradB; if(gradB->rankOf() == 2) gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()})); gradO->reduceAlongDimension(reduce::Sum, *gradBR, {0,indOoH,indOoH+1}, false); // sum over bS, oH, oW if(gradBR != gradB) delete gradBR; } //----- calculation of gradI -----// nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {{2,0,1,3},{iC,kH*kW,mC}}, modifGradO2, modifColumns); // [iC, kH*kW, mC] x [iC, mC, bS*oH*oW] = [iC, kW*kH, bS*oH*oW] helpers::col2im(*input->getContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW] if(!isNCHW) { delete input; delete gradI; } } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::depthwiseConv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2dBP_, (input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling2dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { // x has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC) // z has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimIH; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimIH = isNCHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 4; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); coords[dimIH] /= factorH; coords[dimIH + 1] /= factorW; const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { hipLaunchKernelGGL(( upsampling2dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, factorH, factorW, isNCHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorH, const int factorW, const bool isNCHW) { PointersManager manager(block.launchContext(), "upsampling2d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), upsampling2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorH, factorW, isNCHW), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) { // x has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC) // z has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimID; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimID = isNCDHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 5; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); coords[dimID] /= factorD; coords[dimID + 1] /= factorH; coords[dimID + 2] /= factorW; const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) { hipLaunchKernelGGL(( upsampling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, factorD, factorH, factorW, isNCDHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorD, const int factorH, const int factorW, const bool isNCDHW) { PointersManager manager(block.launchContext(), "upsampling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), upsampling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorD, factorH, factorW, isNCDHW), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCHW) { // x (gradO) has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC) // z (gradI) has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimIH; __shared__ uint factorH, factorW; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimIH = isNCHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 4; factorH = xShapeInfo[dimIH + 1] / zShapeInfo[dimIH + 1]; factorW = xShapeInfo[dimIH + 2] / zShapeInfo[dimIH + 2]; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); z[zOffset] = 0; const Nd4jLong zCoord2 = coords[dimIH] * factorH; const Nd4jLong zCoord3 = coords[dimIH + 1] * factorW; for(coords[dimIH] = zCoord2; coords[dimIH] < zCoord2 + factorH; ++coords[dimIH]) for(coords[dimIH + 1] = zCoord3; coords[dimIH + 1] < zCoord3 + factorW; ++coords[dimIH + 1]) z[zOffset] += x[shape::getOffset(xShapeInfo, coords)]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCHW) { hipLaunchKernelGGL(( upsampling2dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, isNCHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling2dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCHW) { PointersManager manager(block.launchContext(), "upsampling2d_bp"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCHW), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) { // x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC) // z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimID; __shared__ uint factorD, factorH, factorW; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimID = isNCDHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 5; factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1]; factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2]; factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3]; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); z[zOffset] = 0; const Nd4jLong zCoord2 = coords[dimID] * factorD; const Nd4jLong zCoord3 = coords[dimID + 1] * factorH; const Nd4jLong zCoord4 = coords[dimID + 2] * factorW; for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID]) for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1]) for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2]) z[zOffset] += x[shape::getOffset(xShapeInfo, coords)]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) { hipLaunchKernelGGL(( upsampling3dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, isNCDHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling3dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) { PointersManager manager(block.launchContext(), "upsampling3d_bp"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } } }
4f2a68ee137702e1c0420f308ba92c79b6f2f269.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <ops/declarable/helpers/im2col.h> #include <ops/declarable/helpers/col2im.h> #include<ops/declarable/helpers/addBias.h> #include <exceptions/cuda_exception.h> #include <NDArrayFactory.h> #include <MmulHelper.h> #include <PointersManager.h> #include <templatemath.h> namespace nd4j { namespace ops { ////////////////////////////////////////////////////////////////////////// // vol [bS, iC, iD, iH, iW] is convoluted to col [bS, iC, kD, kH, kW, oD, oH, oW] template <typename T> static __global__ void vol2colCuda(const void* volume, const Nd4jLong* volShapeInfo, void* columns, const Nd4jLong* colShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { const T* vol = reinterpret_cast<const T*>(volume); T* col = reinterpret_cast<T*>(columns); __shared__ int colRank, volRank; __shared__ Nd4jLong colLen, iD, iH, iW, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); volRank = 5; colRank = 8; colLen = shape::length(colShapeInfo); iD = volShapeInfo[3]; iH = volShapeInfo[4]; iW = volShapeInfo[5]; } __syncthreads(); const auto colInd = threadIdx.x + blockIdx.x * blockDim.x; if(colInd >= colLen) return; auto coords = sharedMem + threadIdx.x * colRank; shape::index2coords(colInd, colShapeInfo, coords); // const auto colW = coords[7]; // const auto colH = coords[6]; // const auto colD = coords[5]; // const auto kCol = coords[4]; // const auto kRow = coords[3]; // const auto kDep = coords[2]; // const auto c = coords[1]; // const auto b = coords[0]; const auto colOffset = shape::getOffset(colShapeInfo, coords); coords[2] = -pD + coords[2] * dD + coords[5] * sD; // const auto volDep = (-pD + kDep * dD) + colD * sD; coords[3] = -pH + coords[3] * dH + coords[6] * sH; // const auto volRow = (-pH + kRow * dH) + colH * sH; coords[4] = -pW + coords[4] * dW + coords[7] * sW; // const auto volCol = (-pW + kCol * dW) + colW * sW; if (static_cast<unsigned>(coords[2]) >= static_cast<unsigned>(iD) || static_cast<unsigned>(coords[3]) >= static_cast<unsigned>(iH) || static_cast<unsigned>(coords[4]) >= static_cast<unsigned>(iW)) col[colOffset] = static_cast<T>(0.); else col[colOffset] = vol[shape::getOffset(volShapeInfo, coords)]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void vol2colCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* volume, const Nd4jLong* volShapeInfo, void* columns, const Nd4jLong* colShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { vol2colCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(volume, volShapeInfo, columns, colShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::vol2col(nd4j::graph::Context& block, const NDArray& vol, NDArray& col, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { PointersManager manager(block.launchContext(), "vol2col"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (col.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = col.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&col}, {&vol}); BUILD_SINGLE_SELECTOR(vol.dataType(), vol2colCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), vol.getSpecialBuffer(), vol.getSpecialShapeInfo(), col.specialBuffer(), col.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES); NDArray::registerSpecialUse({&col}, {&vol}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// // columns [bS, iC, kD, kH, kW, oD, oH, oW] to be de-convoluted to volume [bS, iC, iD, iH, iW] template <typename T> static __global__ void col2volCuda(const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { const T* col = reinterpret_cast<const T*>(columns); T* vol = reinterpret_cast<T*>(volume); __shared__ uint kD, kH, kW, oD, oH, oW, *sharedMem; __shared__ Nd4jLong volLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<uint*>(shmem); oD = colShapeInfo[6]; oH = colShapeInfo[7]; oW = colShapeInfo[8]; kD = dD * (colShapeInfo[3] - 1) + 1; kH = dH * (colShapeInfo[4] - 1) + 1; kW = dW * (colShapeInfo[5] - 1) + 1; volLen = shape::length(volShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * 8; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < volLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, volShapeInfo, coords); const auto volOffset = shape::getOffset(volShapeInfo, coords); const auto bSiCoffset = coords[0] * colShapeInfo[9] + coords[1] * colShapeInfo[10]; const uint imD = coords[2] + pD; const uint imH = coords[3] + pH; const uint imW = coords[4] + pW; const uint colDstart = (imD < kD) ? 0 : (imD - kD) / sD + 1; const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1; const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1; const uint colDend = nd4j::math::nd4j_min<uint>(imD / sD + 1, oD); const uint colHend = nd4j::math::nd4j_min<uint>(imH / sH + 1, oH); const uint colWend = nd4j::math::nd4j_min<uint>(imW / sW + 1, oW); T val = 0; for(uint colD = colDstart; colD < colDend; ++colD) { coords[2] = imD - colD * sD; if(coords[2] % dD != 0) continue; for(uint colH = colHstart; colH < colHend; ++colH) { coords[3] = imH - colH * sH; if(coords[3] % dH != 0) continue; for(uint colW = colWstart; colW < colWend; ++colW) { coords[4] = imW - colW * sW; if(coords[4] % dW != 0) continue; val += col[bSiCoffset + (coords[2]/dD)*colShapeInfo[11] + (coords[3]/dH)*colShapeInfo[12] + (coords[4]/dW)*colShapeInfo[13] + colD*colShapeInfo[14] + colH*colShapeInfo[15] + colW*colShapeInfo[16]]; } } } vol[volOffset] = val; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void col2volCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { col2volCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(columns, colShapeInfo, volume, volShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::col2vol(nd4j::graph::Context& block, const NDArray& col, NDArray& vol, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) { PointersManager manager(block.launchContext(), "col2vol"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (vol.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256; NDArray::prepareSpecialUse({&vol}, {&col}); BUILD_SINGLE_SELECTOR(vol.dataType(), col2volCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), vol.specialBuffer(), vol.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES); NDArray::registerSpecialUse({&vol}, {&col}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void conv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weights [kH, kW, iC, oC] always // bias [oC] // output [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 1-NCHW, 0-NHWC int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<int> permutForOutput; if(isNCHW) permutForOutput = {0, 3, 1, 2}; // [bS, oH, oW, oC] -> [bS, oC, oH, oW] else input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] if NHWC NDArray col('c', {bS, oH, oW, kH, kW, iC}, input->dataType(), input->getContext()); NDArray colP = col.permute({0, 5, 3, 4, 1, 2}); // {bS, iC, kH, kW, oH, oW} NDArray mmulResult('f', {bS*oH*oW, oC}, output->dataType(), output->getContext()); //----- calculation of output -----// auto ctx = block.launchContext(); helpers::im2col(*ctx, *input, colP, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] MmulHelper::tensorDot(&col, weights, &mmulResult, {3,4,5}, {0,1,2}, {}); // [bS, oH, oW, kH, kW, iC] x [kH, kW, iC, oC] = [bS, oH, oW, oC] //----- assign outTemp to output -----// if(isNCHW) { mmulResult.reshapei({bS, oH, oW, oC}); mmulResult.permutei(permutForOutput); } output->assign(mmulResult); //----- add biases if required -----// if(bias) // output->applyBroadcast(broadcast::Add, {indIOioC}, bias); helpers::addBias(block, *output, *bias, *output, isNCHW); if(!isNCHW) delete input; } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::conv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void depthwiseConv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weights [kH, kW, iC, mC] always // bias [oC] = iC*mC // output [bS, oH, oW, iC*mC] (NHWC) or [bS, iC*mC, oH, oW] (NCHW) // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 0-NCHW, 1-NHWC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier std::vector<std::vector<Nd4jLong>> modifColumns = {{1,0,4,5,2,3}, {iC,bS*oH*oW,kH*kW}}; // [bS,iC,kH,kW,oH,oW] -> [iC,bS,oH,oW,kH,kW] -> [iC,bS*oH*oW,kH*kW] std::vector<std::vector<Nd4jLong>> modifOutput; std::vector<Nd4jLong> outReShape; if(!isNCHW) { outReShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC] modifOutput = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW] } else { outReShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW] modifOutput = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] } if(paddingMode == 1) // SAME ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW); NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext()); NDArray outputReshaped = output->reshape(output->ordering(), outReShape, false); helpers::im2col(*output->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] MmulHelper::tensorDot(&columns, weights, &outputReshaped, modifColumns, {{2,0,1,3},{iC,kH*kW,mC}}, modifOutput); // [iC, bS*oH*oW, kW*kH] x [iC, kH*kW, mC] = [iC, bS*oH*oW, mC] if(bias) // output->applyBroadcast(broadcast::Add, {indIOioC}, bias); helpers::addBias(block, *output, *bias, *output, isNCHW); if(!isNCHW) delete input; } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::depthwiseConv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void sconv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weightsDepth [kH, kW, iC, mC] always // weightsPoint [1, 1, iC*mC, oC] always // bias [oC], oC = iC*mC if weightsPoint=nullptr // output is [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 1-NCHW, 0-NHWC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier, output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weightsDepth->sizeAt(indWmC); // channels multiplier NDArray* outputDepth = output; if(weightsPoint) // if pointwise convolution is expected outputDepth = new NDArray(output->ordering(), !isNCHW ? std::vector<Nd4jLong>({bS, oH, oW, iC*mC}) : std::vector<Nd4jLong>({bS, iC*mC, oH, oW}), input->dataType(), input->getContext()); // ----- perform depthwise convolution (if weightsPoint is absent then oC = iC*mC) ----- // ConvolutionUtils::depthwiseConv2d(block, input, weightsDepth, weightsPoint ? nullptr : bias, outputDepth, kH,kW, sH,sW, pH,pW, dH,dW, paddingMode, isNCHW); // ----- perform pointwise convolution (oH = iH, oW = iW) ----- // if (weightsPoint) { ConvolutionUtils::conv2d(block, outputDepth, weightsPoint, bias, output, 1,1, 1,1, 0,0, 1,1, paddingMode, isNCHW); // in this case oH=iH, oW=iW delete outputDepth; } } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::sconv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), sconv2d_, (block, input, weightsDepth, weightsPoint, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void avgPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z sum = 0.0f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += static_cast<Z>(inSlice[h * strideY + w * strideX]); int divide_factor = pool_size; //Case 0: exclude padding if (extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void avgPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { avgPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void pnormPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if (hstart < 0) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) -hstart / (Z) dH); hstart += f * dH; } if (wstart < 0) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) -wstart / (Z) dW); wstart += f * dW; } if (hend > iH) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) (hend - iH) / (Z) dH); hend -= f * dH; } if (wend > iW) { int f = nd4j::math::nd4j_ceil<Z, int>((Z) (wend - iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = nd4j::math::nd4j_ceil<double, int>((double) (hend - hstart) / (double) dH) * nd4j::math::nd4j_ceil<double, int>((double) (wend - wstart) / (double) dW); Z sum = 0.f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += nd4j::math::nd4j_pow<Z, Z, Z>(static_cast<Z>(nd4j::math::nd4j_abs<X>(inSlice[h * strideY + w * strideX])), extraParam0); z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = nd4j::math::nd4j_pow<Z, Z, Z>(sum, (Z) 1.0f / extraParam0); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void pnormPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { pnormPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void maxPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z max = -nd4j::DataTypeUtils::max<Z>(); const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]); if (v > max) max = v; } } z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max; } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void maxPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { maxPooling2dCuda<X,Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const PoolingType poolingMode, const int extraParam0) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); switch (poolingMode) { case MAX_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), maxPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; case AVG_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), avgPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; case PNORM_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), pnormPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; default: throw std::runtime_error("Pooling2D: Unknown PoolingType used"); } output.tickWriteDevice(); input.tickReadDevice(); auto result = cudaStreamSynchronize(*block.launchContext()->getCudaStream()); if (result != 0) throw cuda_exception::build("Pooling2D failed", result); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x input is [bS, iC, iD, iH, iW] // z output is [bS, iC, oD, oH, oW] const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) max = val; } } } z[zOffset] = max; } break; /*** avg ***/ case 1: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)]; if (extraParam0 == 0) { //Exclude padding uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1); uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1); uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1); sum /= static_cast<T>(a * b * c); // /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation } else if (extraParam0 == 1) //Include padding sum /= kProd; z[zOffset] = sum; } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); sum = nd4j::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0); z[zOffset] = sum; } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { pooling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { PointersManager manager(block.launchContext(), "pooling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x: input [bS, iC, iH, iW] // y: gradO [bS, iC, oH, oW] // z: gradI [bS, iC, iH, iW] -> gradI is output in this function const T* x = reinterpret_cast<const T*>(vx); const T* y = reinterpret_cast<const T*>(vy); T* z = reinterpret_cast<T*>(vz); Nd4jLong coord2, coord3; __shared__ int rank, kHeff, kWeff, iH, iW, kProd; __shared__ Nd4jLong *sharedMem, yLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); yLen = shape::length(yShapeInfo); rank = 4; kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iH = xShapeInfo[3]; iW = xShapeInfo[4]; kProd = kH * kW; } __syncthreads(); const auto yInd = threadIdx.x + blockIdx.x * blockDim.x; if(yInd >= yLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(yInd, yShapeInfo, coords); const auto yOffset = shape::getOffset(yShapeInfo, coords); int hstart = coords[2] * sH - pH; int wstart = coords[3] * sW - pW; int hend = hstart + kHeff; int wend = wstart + kWeff; if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { coord2 = hstart; coord3 = wstart; T max = -DataTypeUtils::max<T>(); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW){ T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) { max = val; coord2 = coords[2]; coord3 = coords[3]; } } } coords[2] = coord2; coords[3] = coord3; auto zOffset = shape::getOffset(zShapeInfo, coords); nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], y[yOffset]); //z[zOffset] += y[yOffset]; } break; /*** avg ***/ case 1: { T val = y[yOffset]; if (extraParam0 == 0) //Exclude padding val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation else if (extraParam0 == 1) //Include padding val /= kProd; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val); } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); T val = y[yOffset]; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) { const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset])); } } } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { pooling2dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // initial zeroing of gradI gradI.nullify(); PointersManager manager(block.launchContext(), "pooling2dBP"); const int threadsPerBlock = 256; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x: input [bS, iC, iD, iH, iW] // y: gradO [bS, iC, oD, oH, oW] // z: gradI [bS, iC, iD, iH, iW] -> gradI is output in this function const T* x = reinterpret_cast<const T*>(vx); const T* y = reinterpret_cast<const T*>(vy); T* z = reinterpret_cast<T*>(vz); Nd4jLong coord2, coord3, coord4; __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong *sharedMem, yLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); yLen = shape::length(yShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto yInd = threadIdx.x + blockIdx.x * blockDim.x; if(yInd >= yLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(yInd, yShapeInfo, coords); const auto yOffset = shape::getOffset(yShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) { max = val; coord2 = coords[2]; coord3 = coords[3]; coord4 = coords[4]; } } } } coords[2] = coord2; coords[3] = coord3; coords[4] = coord4; nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], y[yOffset]); } break; /*** avg ***/ case 1: { T val = y[yOffset]; if (extraParam0 == 0) //Exclude padding val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation else if (extraParam0 == 1) //Include padding val /= kProd; for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val); } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); T val = y[yOffset]; for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) { for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset])); } } } } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { pooling3dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling3dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // initial zeroing of gradI gradI.nullify(); PointersManager manager(block.launchContext(), "pooling3dBP"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void conv2dBP_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // weights [kH, kW, iC, oC] always // bias [oC] // gradO [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next // gradI [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon // gradW [kH, kW, iC, oC] always // gradB [oC] // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 0-NHWC, 1-NCHW int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<int> gradOaxesForDot; if(!isNCHW) { gradOaxesForDot = {0, 1, 2}; // bS, oH, oW input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] } else { gradOaxesForDot = {0, 2, 3}; // bS, oH, oW } NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext()); // ----- calculation of gradW ----- // if(gradW) { auto ctx = block.launchContext(); helpers::im2col(*ctx, *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] nd4j::MmulHelper::tensorDot(&columns, gradO, gradW, {0,4,5}, gradOaxesForDot, {2, 0, 1, 3}); // [bS, iC, kH, kW, oH, oW] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [iC, kH, kW, oC] } // ----- calculation of gradB ----- // if(gradB) { NDArray* gradBR = gradB; if(gradB->rankOf() == 2) gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()})); gradO->reduceAlongDimension(reduce::Sum, *gradBR, gradOaxesForDot, false); // sum over bS, oH, oW if(gradBR != gradB) delete gradBR; } //----- calculation of gradI -----// nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {indWoC}, {indIOioC}, {2, 3, 1, 0, 4, 5}); // [kH, kW, iC, oC]/[oC, iC, kH, kW]] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [kH, kW, iC, bS, oH, oW] helpers::col2im(*block.launchContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW] if(!isNCHW) { delete input; delete gradI; } } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::conv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2dBP_, (block, input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void depthwiseConv2dBP_(const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { // input [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW) // weights [kH, kW, iC, mC] always // bias [oC] = [iC*mC] // gradO [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next // gradI [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW), epsilon // gradW [kH, kW, iC, mC] always // gradB [oC] // kH filter(kernel) height // kW filter(kernel) width // sH strides height // sW strides width // pH paddings height // pW paddings width // dH dilations height // dW dilations width // paddingMode 0-VALID, 1-SAME // isNCHW 0-NHWC, 1-NCHW int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier std::vector<std::vector<Nd4jLong>> modifColumns = {{1,2,3,0,4,5}, {iC, kH*kW, bS*oH*oW}}; // [bS,iC,kH,kW,oH,oW] -> [iC, kH*kW, bS*oH*oW] std::vector<std::vector<Nd4jLong>> modifGradO1, modifGradO2; std::vector<Nd4jLong> gradOreShape; if(!isNCHW) { gradOreShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC] modifGradO1 = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] modifGradO2 = {{3,0,1,2},{iC, mC, bS*oH*oW}}; // [bS,oH,oW,iC*mC] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW] input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW] gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW] } else { gradOreShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW] modifGradO1 = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC] modifGradO2 = {{1,0,2,3},{iC, mC, bS*oH*oW}}; // [bS,iC*mC,oH,oW] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW] } if(paddingMode == 1) // SAME ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW); NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext()); NDArray gradOreshaped = gradO->reshape(gradO->ordering(), gradOreShape); // ----- calculation of gradW and gradB ----- // helpers::im2col(*input->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] nd4j::MmulHelper::tensorDot(&columns, &gradOreshaped, gradW, modifColumns, modifGradO1, {{2,0,1,3},{iC,kH*kW,mC}}); // [iC, kW*kH, bS*oH*oW] x [iC, bS*oH*oW, mC] = [iC, kH*kW, mC] // ----- calculation of gradB ----- // if(gradB) { NDArray* gradBR = gradB; if(gradB->rankOf() == 2) gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()})); gradO->reduceAlongDimension(reduce::Sum, *gradBR, {0,indOoH,indOoH+1}, false); // sum over bS, oH, oW if(gradBR != gradB) delete gradBR; } //----- calculation of gradI -----// nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {{2,0,1,3},{iC,kH*kW,mC}}, modifGradO2, modifColumns); // [iC, kH*kW, mC] x [iC, mC, bS*oH*oW] = [iC, kW*kH, bS*oH*oW] helpers::col2im(*input->getContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW] if(!isNCHW) { delete input; delete gradI; } } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::depthwiseConv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) { BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2dBP_, (input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling2dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { // x has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC) // z has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimIH; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimIH = isNCHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 4; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); coords[dimIH] /= factorH; coords[dimIH + 1] /= factorW; const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { upsampling2dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, factorH, factorW, isNCHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorH, const int factorW, const bool isNCHW) { PointersManager manager(block.launchContext(), "upsampling2d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), upsampling2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorH, factorW, isNCHW), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) { // x has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC) // z has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimID; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimID = isNCDHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 5; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); coords[dimID] /= factorD; coords[dimID + 1] /= factorH; coords[dimID + 2] /= factorW; const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) { upsampling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, factorD, factorH, factorW, isNCDHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorD, const int factorH, const int factorW, const bool isNCDHW) { PointersManager manager(block.launchContext(), "upsampling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), upsampling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorD, factorH, factorW, isNCDHW), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCHW) { // x (gradO) has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC) // z (gradI) has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimIH; __shared__ uint factorH, factorW; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimIH = isNCHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 4; factorH = xShapeInfo[dimIH + 1] / zShapeInfo[dimIH + 1]; factorW = xShapeInfo[dimIH + 2] / zShapeInfo[dimIH + 2]; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); z[zOffset] = 0; const Nd4jLong zCoord2 = coords[dimIH] * factorH; const Nd4jLong zCoord3 = coords[dimIH + 1] * factorW; for(coords[dimIH] = zCoord2; coords[dimIH] < zCoord2 + factorH; ++coords[dimIH]) for(coords[dimIH + 1] = zCoord3; coords[dimIH + 1] < zCoord3 + factorW; ++coords[dimIH + 1]) z[zOffset] += x[shape::getOffset(xShapeInfo, coords)]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCHW) { upsampling2dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, isNCHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling2dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCHW) { PointersManager manager(block.launchContext(), "upsampling2d_bp"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCHW), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) { // x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC) // z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimID; __shared__ uint factorD, factorH, factorW; __shared__ Nd4jLong *sharedMem, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimID = isNCDHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 5; factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1]; factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2]; factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3]; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); z[zOffset] = 0; const Nd4jLong zCoord2 = coords[dimID] * factorD; const Nd4jLong zCoord3 = coords[dimID + 1] * factorH; const Nd4jLong zCoord4 = coords[dimID + 2] * factorW; for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID]) for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1]) for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2]) z[zOffset] += x[shape::getOffset(xShapeInfo, coords)]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) { upsampling3dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, isNCDHW); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::upsampling3dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) { PointersManager manager(block.launchContext(), "upsampling3d_bp"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } } }
1b94e2202819beeb49d8e6338863e161a46095a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Arman Pazouki, Wei Hu // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForceExplicitSPH.cuh" //================================================================================================================================ namespace chrono { namespace fsi { __device__ __inline__ void calc_G_Matrix(Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* G_i, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; // get address in grid int3 gridPos = calcGridPos(posRadA); // This is the elements of inverse of G Real mGi[9] = {0.0}; // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real3 grad_i_wij = GradWh(rij, h_i); Real3 grw_vj = grad_i_wij * paramsD.volume0; mGi[0] -= rij.x * grw_vj.x; mGi[1] -= rij.x * grw_vj.y; mGi[2] -= rij.x * grw_vj.z; mGi[3] -= rij.y * grw_vj.x; mGi[4] -= rij.y * grw_vj.y; mGi[5] -= rij.y * grw_vj.z; mGi[6] -= rij.z * grw_vj.x; mGi[7] -= rij.z * grw_vj.y; mGi[8] -= rij.z * grw_vj.z; } } } Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); if (abs(Det) > 0.01) { Real OneOverDet = 1.0/Det; G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet; G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet; G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet; G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet; G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet; G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet; G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet; G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet; G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet; } else { for (int i = 0; i < 9; i++) { G_i[i] = 0.0; } G_i[0] = 1; G_i[4] = 1; G_i[8] = 1; } } __device__ __inline__ void calc_A_Matrix(Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* A_i, Real* G_i, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; // get address in grid int3 gridPos = calcGridPos(posRadA); // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real h_j = sortedPosRad[j].w; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = paramsD.markerMass/paramsD.rho0; Real com_part = 0; com_part = (G_i[0] * grad_ij.x + G_i[1] * grad_ij.y + G_i[2] * grad_ij.z) * V_j; A_i[0] += rij.x * rij.x * com_part; // 111 A_i[1] += rij.x * rij.y * com_part; // 112 A_i[2] += rij.x * rij.z * com_part; // 113 A_i[3] += rij.y * rij.x * com_part; // 121 A_i[4] += rij.y * rij.y * com_part; // 122 A_i[5] += rij.y * rij.z * com_part; // 123 A_i[6] += rij.z * rij.x * com_part; // 131 A_i[7] += rij.z * rij.y * com_part; // 132 A_i[8] += rij.z * rij.z * com_part; // 133 com_part = (G_i[3] * grad_ij.x + G_i[4] * grad_ij.y + G_i[5] * grad_ij.z) * V_j; A_i[9] += rij.x * rij.x * com_part; // 211 A_i[10] += rij.x * rij.y * com_part; // 212 A_i[11] += rij.x * rij.z * com_part; // 213 A_i[12] += rij.y * rij.x * com_part; // 221 A_i[13] += rij.y * rij.y * com_part; // 222 A_i[14] += rij.y * rij.z * com_part; // 223 A_i[15] += rij.z * rij.x * com_part; // 231 A_i[16] += rij.z * rij.y * com_part; // 232 A_i[17] += rij.z * rij.z * com_part; // 233 com_part = (G_i[6] * grad_ij.x + G_i[7] * grad_ij.y + G_i[8] * grad_ij.z) * V_j; A_i[18] += rij.x * rij.x * com_part; // 311 A_i[19] += rij.x * rij.y * com_part; // 312 A_i[20] += rij.x * rij.z * com_part; // 313 A_i[21] += rij.y * rij.x * com_part; // 321 A_i[22] += rij.y * rij.y * com_part; // 322 A_i[23] += rij.y * rij.z * com_part; // 323 A_i[24] += rij.z * rij.x * com_part; // 331 A_i[25] += rij.z * rij.y * com_part; // 332 A_i[26] += rij.z * rij.z * com_part; // 333 } } } } __device__ __inline__ void calc_L_Matrix(Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* A_i, Real* L_i, Real* G_i, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real B[36] = {0.0}; Real L[6] = {0.0}; // get address in grid int3 gridPos = calcGridPos(posRadA); // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; // Real m_j = paramsD.markerMass; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = paramsD.markerMass/paramsD.rho0; Real com_part = 0; // mn=11 Real XX = (eij.x * grad_ij.x); Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x); Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x); Real YY = (eij.y * grad_ij.y); Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y); Real ZZ = (eij.z * grad_ij.z); com_part = (A_i[0] * eij.x + A_i[9] * eij.y + A_i[18] * eij.z + rij.x * eij.x) * V_j; B[6 * 0 + 0] += com_part * XX; // 11 B[6 * 0 + 1] += com_part * XY; // 12 B[6 * 0 + 2] += com_part * XZ; // 13 B[6 * 0 + 3] += com_part * YY; // 14 B[6 * 0 + 4] += com_part * YZ; // 15 B[6 * 0 + 5] += com_part * ZZ; // 15 // mn=12 com_part = (A_i[1] * eij.x + A_i[10] * eij.y + A_i[19] * eij.z + rij.x * eij.y) * V_j; B[6 * 1 + 0] += com_part * XX; // 21 B[6 * 1 + 1] += com_part * XY; // 22 B[6 * 1 + 2] += com_part * XZ; // 23 B[6 * 1 + 3] += com_part * YY; // 24 B[6 * 1 + 4] += com_part * YZ; // 25 B[6 * 1 + 5] += com_part * ZZ; // 25 // mn=13 com_part = (A_i[2] * eij.x + A_i[11] * eij.y + A_i[20] * eij.z + rij.x * eij.z) * V_j; B[6 * 2 + 0] += com_part * XX; // 31 B[6 * 2 + 1] += com_part * XY; // 32 B[6 * 2 + 2] += com_part * XZ; // 33 B[6 * 2 + 3] += com_part * YY; // 34 B[6 * 2 + 4] += com_part * YZ; // 35 B[6 * 2 + 5] += com_part * ZZ; // 36 // Note that we skip mn=21 since it is similar to mn=12 // mn=22 com_part = (A_i[4] * eij.x + A_i[13] * eij.y + A_i[22] * eij.z + rij.y * eij.y) * V_j; B[6 * 3 + 0] += com_part * XX; // 41 B[6 * 3 + 1] += com_part * XY; // 42 B[6 * 3 + 2] += com_part * XZ; // 43 B[6 * 3 + 3] += com_part * YY; // 44 B[6 * 3 + 4] += com_part * YZ; // 45 B[6 * 3 + 5] += com_part * ZZ; // 46 // mn=23 com_part = (A_i[5] * eij.x + A_i[14] * eij.y + A_i[23] * eij.z + rij.y * eij.z) * V_j; B[6 * 4 + 0] += com_part * XX; // 51 B[6 * 4 + 1] += com_part * XY; // 52 B[6 * 4 + 2] += com_part * XZ; // 53 B[6 * 4 + 3] += com_part * YY; // 54 B[6 * 4 + 4] += com_part * YZ; // 55 B[6 * 4 + 5] += com_part * ZZ; // 56 // mn=33 com_part = (A_i[8] * eij.x + A_i[17] * eij.y + A_i[26] * eij.z + rij.z * eij.z) * V_j; B[6 * 5 + 0] += com_part * XX; // 61 B[6 * 5 + 1] += com_part * XY; // 62 B[6 * 5 + 2] += com_part * XZ; // 63 B[6 * 5 + 3] += com_part * YY; // 64 B[6 * 5 + 4] += com_part * YZ; // 65 B[6 * 5 + 5] += com_part * ZZ; // 66 } } } inv6xdelta_mn(B, L); L_i[0] = L[0]; L_i[1] = L[1]; L_i[2] = L[2]; L_i[3] = L[1]; L_i[4] = L[3]; L_i[5] = L[4]; L_i[6] = L[2]; L_i[7] = L[4]; L_i[8] = L[5]; // Real Det = (L_i[0] * L_i[4] * L_i[8] - L_i[0] * L_i[5] * L_i[7] - L_i[1] * L_i[3] * L_i[8] + // L_i[1] * L_i[5] * L_i[6] + L_i[2] * L_i[3] * L_i[7] - L_i[2] * L_i[4] * L_i[6]); // if (abs(Det) < 0.01) { // for (int i = 0; i < 9; i++) { // L_i[0 * 9 + i] = 0.0; // L_i[0 * 9 + 0] = 1; // L_i[0 * 9 + 4] = 1; // L_i[0 * 9 + 8] = 1; // } // } // printf("L Det %f\n", Det); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Shear_Stress_Rate(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real3* sortedVelMas, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real3* sortedTauXxYyZz, Real3* sortedTauXyXzYz, Real3* sortedDerivTauXxYyZz, Real3* sortedDerivTauXyXzYz, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) { return; } if (sortedRhoPreMu[index].w > -0.5) { return; } Real3 posRadA = mR3(sortedPosRad[index]); Real3 velMasA = sortedVelMas[index]; Real hA = sortedPosRad[index].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real tauxx = sortedTauXxYyZz[index].x; Real tauyy = sortedTauXxYyZz[index].y; Real tauzz = sortedTauXxYyZz[index].z; Real tauxy = sortedTauXyXzYz[index].x; Real tauxz = sortedTauXyXzYz[index].y; Real tauyz = sortedTauXyXzYz[index].z; Real tauzx = tauxz; Real tauzy = tauyz; Real tauyx = tauxy; Real dTauxx = 0.0; Real dTauyy = 0.0; Real dTauzz = 0.0; Real dTauxy = 0.0; Real dTauxz = 0.0; Real dTauyz = 0.0; Real G_i[9] = {0.0}; calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers); // get address in grid int3 gridPos = calcGridPos(posRadA); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; Real3 velMasB = sortedVelMas[j]; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuB.w > -1.0) { int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers); if (!(bceIndexB >= 0 && bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) { printf("Error! bceIndex out of bound, collideCell !\n"); } rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; // to check velMasB = velMas_ModifiedBCE[bceIndexB]; // to check velMasB = 2.0*velMasB - velMasA; // noslip BC } Real rhoB = rhoPresMuB.x; Real hB = sortedPosRad[j].w; Real mB = paramsD.markerMass; Real3 gradW = GradWh(dist3, (hA + hB) * 0.5); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; gradW = gradW_new; // start to calculate the rate Real Gm = paramsD.G_shear; // shear modulus of the material Real half_mB_over_rhoB = 0.5 * (mB / rhoB); Real3 vAB = velMasA - velMasB; Real3 vAB_h = (velMasA - velMasB) * half_mB_over_rhoB; // entries of strain rate tensor Real exx = -2.0 * vAB_h.x * gradW.x; Real eyy = -2.0 * vAB_h.y * gradW.y; Real ezz = -2.0 * vAB_h.z * gradW.z; Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x; Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x; Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y; // entries of rotation rate (spin) tensor // Real wxx = 0.0; // Real wyy = 0.0; // Real wzz = 0.0; Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x; Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x; Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y; Real wyx = -wxy; // Real wzx = -wxz; Real wzy = -wyz; Real edia = 1.0 / 3.0 * (exx + eyy + ezz); Real twoGm = 2.0 * Gm; Real K_edia = paramsD.K_bulk*1.0*edia; dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia; dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia; dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia; dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy); dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz); dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz); } } } } } sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz); sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcRho_kernel(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real4* sortedRhoPreMu_old, Real* _sumWij_rhoi, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, int density_reinit, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } sortedRhoPreMu_old[i_idx].y = Eos(sortedRhoPreMu_old[i_idx].x, sortedRhoPreMu_old[i_idx].w); Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real sum_mW = 0; Real sum_mW_rho = 0.0000001; Real sum_W = 0.0; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; if (sortedRhoPreMu_old[j].w == -1) { // Real h_j = sortedPosRad[j].w; Real m_j = paramsD.markerMass; // cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real W3 = W3h_GPU(d, 0.5 * (h_j + h_i)); sum_mW += m_j * W3; sum_W += W3; sum_mW_rho += m_j * W3 / sortedRhoPreMu_old[j].x; } } } } } } // sumWij_inv[i_idx] = paramsD.markerMass / sum_mW; // sortedRhoPreMu[i_idx].x = sum_mW; if ((density_reinit == 0) && (sortedRhoPreMu[i_idx].w == -1)) sortedRhoPreMu[i_idx].x = sum_mW / sum_mW_rho; if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0.01 * paramsD.rho0) && sortedRhoPreMu[i_idx].w == -1) printf("(calcRho_kernel)density marker %d, sum_mW=%f, sum_W=%f, h_i=%f\n", i_idx, sum_mW, sum_W, h_i); } //-------------------------------------------------------------------------------------------------------------------------------- // modify pressure for body force __device__ __inline__ void modifyPressure(Real4& rhoPresMuB, const Real3& dist3Alpha) { // body force in x direction rhoPresMuB.y = (dist3Alpha.x > 0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y - paramsD.deltaPress.x) : rhoPresMuB.y; rhoPresMuB.y = (dist3Alpha.x < -0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y + paramsD.deltaPress.x) : rhoPresMuB.y; // body force in x direction rhoPresMuB.y = (dist3Alpha.y > 0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y - paramsD.deltaPress.y) : rhoPresMuB.y; rhoPresMuB.y = (dist3Alpha.y < -0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y + paramsD.deltaPress.y) : rhoPresMuB.y; // body force in x direction rhoPresMuB.y = (dist3Alpha.z > 0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y - paramsD.deltaPress.z) : rhoPresMuB.y; rhoPresMuB.y = (dist3Alpha.z < -0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y + paramsD.deltaPress.z) : rhoPresMuB.y; } //-------------------------------------------------------------------------------------------------------------------------------- __device__ inline Real3 CubicSolve(Real aa, Real bb, Real cc, Real dd) { Real disc, q, r, dum1, dum2, term1, r13; bb /= aa; cc /= aa; dd /= aa; if (aa == 0) { return mR3(0, 0, 0); } if (abs(bb) < 1e-9) { return mR3(0, 0, 0); } if (abs(cc) < 1e-9) { return mR3(0, 0, 0); } if (abs(dd) < 1e-9) { return mR3(0, 0, 0); } q = (3.0 * cc - (bb * bb)) / 9.0; r = -(27.0 * dd) + bb * (9.0 * cc - 2.0 * (bb * bb)); r /= 54.0; disc = q * q * q + r * r; term1 = (bb / 3.0); /* dataForm.x1Im.value = 0; //The first root is always real. if (disc > 0) { // one root real, two are complex s = r + Math.sqrt(disc); s = ((s < 0) ? -Math.pow(-s, (1.0/3.0)) : Math.pow(s, (1.0/3.0))); t = r - Math.sqrt(disc); t = ((t < 0) ? -Math.pow(-t, (1.0/3.0)) : Math.pow(t, (1.0/3.0))); dataForm.x1Re.value = -term1 + s + t; term1 += (s + t)/2.0; dataForm.x3Re.value = dataForm.x2Re.value = -term1; term1 = Math.sqrt(3.0)*(-t + s)/2; dataForm.x2Im.value = term1; dataForm.x3Im.value = -term1; return; } // End if (disc > 0) // The remaining options are all real dataForm.x3Im.value = dataForm.x2Im.value = 0; if (disc == 0){ // All roots real, at least two are equal. r13 = ((r < 0) ? -Math.pow(-r,(1.0/3.0)) : Math.pow(r,(1.0/3.0))); dataForm.x1Re.value = -term1 + 2.0*r13; dataForm.x3Re.value = dataForm.x2Re.value = -(r13 + term1); return; } // End if (disc == 0) */ Real xRex, xRey, xRez; // have complex root if (disc > 0) { xRex = 0.0; xRey = 0.0; xRez = 0.0; return mR3(xRex, xRey, xRez); } // All roots real, at least two are equal. if (disc == 0) { if (r < 0) { r13 = pow(-r, (1.0 / 3.0)); } else { r13 = pow(r, (1.0 / 3.0)); } xRex = -term1 + 2.0 * r13; xRey = -(r13 + term1); xRez = xRey; return mR3(xRex, xRey, xRez); } // All roots are real and unequal (to get here, q < 0) q = -q; dum1 = q * q * q; dum2 = r / (sqrt(dum1 + 1.0e-9)); if ((dum2 >= 0) && (dum2 <= 1)) { dum1 = acos(dum2); } else { xRex = 0.0; xRey = 0.0; xRez = 0.0; return mR3(xRex, xRey, xRez); } r13 = 2.0 * sqrt(q); xRex = -term1 + r13 * cos(dum1 / 3.0); xRey = -term1 + r13 * cos((dum1 + 2.0 * 3.1415926) / 3.0); xRez = -term1 + r13 * cos((dum1 + 4.0 * 3.1415926) / 3.0); return mR3(xRex, xRey, xRez); } __device__ inline Real3 CubicEigen(Real4 c1, Real4 c2, Real4 c3) { Real a = c1.x; Real b = c1.y; Real c = c1.z; Real d = c1.w; Real l = c2.x; Real m = c2.y; Real n = c2.z; Real k = c2.w; Real p = c3.x; Real q = c3.y; Real r = c3.z; Real s = c3.w; Real D = (a * m * r + b * p * n + c * l * q) - (a * n * q + b * l * r + c * m * p) + 1.0e-9; Real x = ((b * r * k + c * m * s + d * n * q) - (b * n * s + c * q * k + d * m * r)) / D; Real y = ((a * n * s + c * p * k + d * l * r) - (a * r * k + c * l * s + d * n * p)) / D; Real z = ((a * q * k + b * l * s + d * m * p) - (a * m * s + b * p * k + d * l * q)) / D; b = b + 1.0e-9; x = 1.0e0; z = (-l + a * m / b) / (n - c * m / b); y = (-a - c * z) / b; Real R = sqrt(x * x + y * y + z * z); x = x / R; y = y / R; z = z / R; // if(abs(D) < 1){ // return mR3(0,0,0); // } // if(abs(m) < 0.1){ // x=0; // y=1; // z=0; // return mR3(x,y,z); // } // else{ // y=0; // if(abs(c) > 0.1){ // x=1; // z=-a/c; // return mR3(x,y,z); // } // if(abs(a) > 0.1){ // z=1; // x=-c/a; // return mR3(x,y,z); // } // } return mR3(x, y, z); } //-------------------------------------------------------------------------------------------------------------------------------- /** * @brief DifVelocityRho * @details See SDKCollisionSystem.cuh */ __device__ inline Real4 DifVelocityRho(float G_i[9], Real3 dist3, Real d, Real4 posRadA, Real4 posRadB, Real3 velMasA, Real3 vel_XSPH_A, Real3 velMasB, Real3 vel_XSPH_B, Real4 rhoPresMuA, Real4 rhoPresMuB, Real multViscosity) { Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5); // Real3 gradW_new; // gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; // gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; // gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; // gradW = gradW_new; // Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3); // // // //*** Artificial viscosity type 1.1 // Real alpha = .001; // Real c_ab = 10 * paramsD.v_Max; // Ma = .1;//sqrt(7.0f * 10000 / // // ((rhoPresMuA.x + rhoPresMuB.x) / 2.0f)); // // Real h = paramsD.HSML; // Real rho = .5f * (rhoPresMuA.x + rhoPresMuB.x); // Real nu = alpha * paramsD.HSML * c_ab / rho; // // //*** Artificial viscosity type 1.2 // // Real nu = 22.8f * paramsD.mu0 / 2.0f / (rhoPresMuA.x * rhoPresMuB.x); // Real3 derivV = -paramsD.markerMass * // (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x) - // nu * vAB_Dot_rAB / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML)) * // gradW; // return mR4(derivV, rhoPresMuA.x * paramsD.markerMass / rhoPresMuB.x * dot(vel_XSPH_A - vel_XSPH_B, gradW)); //*** Artificial viscosity type 2 if (rhoPresMuA.w > -1 && rhoPresMuB.w > -1) return mR4(0.0); Real rAB_Dot_GradWh = dot(dist3, gradW); Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML); Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu0 * rAB_Dot_GradWh_OverDist * (velMasA - velMasB) / square(rhoPresMuA.x + rhoPresMuB.x); // Real derivRho = rhoPresMuA.x * paramsD.markerMass / rhoPresMuB.x * dot(vel_XSPH_A - vel_XSPH_B, gradW); // Real zeta = 0;//.05;//.1; // Real derivRho = rhoPresMuA.x * paramsD.markerMass * invrhoPresMuBx * //(dot(vel_XSPH_A - vel_XSPH_B, gradW) // + zeta * paramsD.HSML * (10 * paramsD.v_Max) * 2 * (rhoPresMuB.x /// rhoPresMuA.x - 1) * // rAB_Dot_GradWh_OverDist // ); //-------------------------------- // Ferrari Modification Real derivRho = paramsD.markerMass * dot(vel_XSPH_A - vel_XSPH_B, gradW); // Real cA = FerrariCi(rhoPresMuA.x); // Real cB = FerrariCi(rhoPresMuB.x); // derivRho += rAB_Dot_GradWh / (d + paramsD.epsMinMarkersDis * paramsD.HSML) * max(cA, cB) / rhoPresMuB.x * // (rhoPresMuB.x - rhoPresMuA.x); //*** Artificial viscosity Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3); // if (vAB_Dot_rAB < 0.0) { // if ((rhoPresMuA.w < - 0.5) && (rhoPresMuB.w < - 0.5)){ // only for fluid particles Real alpha = 0.0;//paramsD.Ar_vis_alpha; Real c_ab = paramsD.Cs; Real rho = 0.5f * (rhoPresMuA.x * rhoPresMuB.x); Real nu = -alpha * paramsD.HSML * c_ab / rho; Real derivM1 = -paramsD.markerMass * (nu * vAB_Dot_rAB / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML)); derivV.x += derivM1 * gradW.x; derivV.y += derivM1 * gradW.y; derivV.z += derivM1 * gradW.z; // } // } // -------------------------------- return mR4(derivV, derivRho); // //*** Artificial viscosity type 1.3 // Real rAB_Dot_GradWh = dot(dist3, gradW); // Real3 derivV = -paramsD.markerMass * // (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * // rhoPresMuB.x)) * gradW + // paramsD.markerMass / (rhoPresMuA.x * rhoPresMuB.x) * 2.0f * paramsD.mu0 * rAB_Dot_GradWh / // (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML) * (velMasA - velMasB); // return mR4(derivV, rhoPresMuA.x * paramsD.markerMass / rhoPresMuB.x * dot(vel_XSPH_A - vel_XSPH_B, gradW)); } /// Only for modelling elastic and granular problems __device__ inline Real4 DifVelocityRho_ElasticSPH(Real3 gradW, Real3 dist3, Real d, Real invd, Real4 posRadA, Real4 posRadB, Real3 velMasA_in, Real3 vel_XSPH_A_in, Real3 velMasB_in, Real3 vel_XSPH_B_in, Real4 rhoPresMuA, Real4 rhoPresMuB, Real multViscosity, Real3 tauXxYyZz_A_in, Real3 tauXyXzYz_A_in, Real3 tauXxYyZz_B_in, Real3 tauXyXzYz_B_in) { // if (rhoPresMuA.w > -1 ) // return mR4(0.0); // if (rhoPresMuB.w > -1 ) // return mR4(0.0); Real3 velMasA = velMasA_in; Real3 velMasB = velMasB_in; Real3 vel_XSPH_A = vel_XSPH_A_in; Real3 vel_XSPH_B = vel_XSPH_B_in; Real3 tauXxYyZz_A = tauXxYyZz_A_in; Real3 tauXxYyZz_B = tauXxYyZz_B_in; Real3 tauXyXzYz_A = tauXyXzYz_A_in; Real3 tauXyXzYz_B = tauXyXzYz_B_in; if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5) return mR4(0.0); if (rhoPresMuA.w < -0.5 && rhoPresMuB.w > -0.5){ tauXxYyZz_B = tauXxYyZz_A; tauXyXzYz_B = tauXyXzYz_A; vel_XSPH_B = 2.0*vel_XSPH_B - vel_XSPH_A; // noslip BC // velMasB = 2.0*velMasB - velMasA; // noslip BC } if (rhoPresMuA.w > -0.5 && rhoPresMuB.w < -0.5){ tauXxYyZz_A = tauXxYyZz_B; tauXyXzYz_A = tauXyXzYz_B; } Real txxA = tauXxYyZz_A.x; Real tyyA = tauXxYyZz_A.y; Real tzzA = tauXxYyZz_A.z; Real txyA = tauXyXzYz_A.x; Real txzA = tauXyXzYz_A.y; Real tyzA = tauXyXzYz_A.z; Real txxB = tauXxYyZz_B.x; Real tyyB = tauXxYyZz_B.y; Real tzzB = tauXxYyZz_B.z; Real txyB = tauXyXzYz_B.x; Real txzB = tauXyXzYz_B.y; Real tyzB = tauXyXzYz_B.z; // Real PA = rhoPresMuA.y; // Real PB = rhoPresMuB.y; // Real rhoA = rhoPresMuA.x; // Real rhoB = rhoPresMuB.x; // Real rhoA2 = rhoA * rhoA; // Real rhoB2 = rhoB * rhoB; Real Mass = paramsD.markerMass; Real MassOverRhoA2 = Mass * paramsD.invrho0 * paramsD.invrho0;//Mass/rhoA2; Real MassOverRhoB2 = MassOverRhoA2;//Mass/rhoB2; Real3 MA_gradW = gradW * MassOverRhoA2; Real3 MB_gradW = gradW * MassOverRhoB2; Real derivVx = //-Mass * (PA / (rhoA * rhoA) + PB / (rhoB * rhoB)) * gradW.x + (txxA * MA_gradW.x + txyA * MA_gradW.y + txzA * MA_gradW.z) + (txxB * MB_gradW.x + txyB * MB_gradW.y + txzB * MB_gradW.z) ; Real derivVy = //-Mass * (PA / (rhoA * rhoA) + PB / (rhoB * rhoB)) * gradW.y + (txyA * MA_gradW.x + tyyA * MA_gradW.y + tyzA * MA_gradW.z) + (txyB * MB_gradW.x + tyyB * MB_gradW.y + tyzB * MB_gradW.z) ; Real derivVz = //-Mass * (PA / (rhoA * rhoA) + PB / (rhoB * rhoB)) * gradW.z + (txzA * MA_gradW.x + tyzA * MA_gradW.y + tzzA * MA_gradW.z) + (txzB * MB_gradW.x + tyzB * MB_gradW.y + tzzB * MB_gradW.z) ; // TODO: visco-plastic model // Real vel = length(velMasA); // if(vel > 0.3){ // Real rAB_Dot_GradWh = dot(dist3, gradW); // Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML); // Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW // + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu_fric_s // * pow(rhoPresMuA.x + rhoPresMuB.x, Real(-2)) * rAB_Dot_GradWh_OverDist * (velMasA - velMasB); // derivVx = derivV.x; // derivVy = derivV.y; // derivVz = derivV.z; // } //*** Artificial viscosity Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3); // if (vAB_Dot_rAB < 0.0) { // if ((rhoPresMuA.w < - 0.5) && (rhoPresMuB.w < - 0.5)){ // only for fluid particles Real alpha = paramsD.Ar_vis_alpha; Real c_ab = paramsD.Cs; // Real rho = 0.5f * (rhoA + rhoB); Real nu = -alpha * paramsD.HSML * c_ab * paramsD.invrho0; Real derivM1 = -Mass * (nu * vAB_Dot_rAB * (invd * invd));//+ paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML derivVx += derivM1 * gradW.x; derivVy += derivM1 * gradW.y; derivVz += derivM1 * gradW.z; // } // } // damping force /*if (1 == 0) { Real xi0 = paramsD.Vis_Dam; Real E0 = paramsD.E_young; Real h0 = paramsD.HSML; Real Cd = xi0 * sqrt(E0 / (rhoA * h0 * h0)); derivVx -= Cd * velMasA.x; derivVy -= Cd * velMasA.y; derivVz -= Cd * velMasA.z; }*/ // Real derivRho = Mass * dot(vel_XSPH_A - vel_XSPH_B, gradW); return mR4(derivVx, derivVy, derivVz, 0.0); } //-------------------------------------------------------------------------------------------------------------------------------- __device__ inline Real3 GradientOperator( float G_i[9], Real3 dist3, Real4 posRadA, Real4 posRadB, Real fA, Real fB, Real4 rhoPresMuA, Real4 rhoPresMuB) { Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; Real Vol = paramsD.markerMass/rhoPresMuB.x; Real fji = fB - fA; Real Gra_ij_x = fji*gradW_new.x * Vol; Real Gra_ij_y = fji*gradW_new.y * Vol; Real Gra_ij_z = fji*gradW_new.z * Vol; return mR3(Gra_ij_x, Gra_ij_y, Gra_ij_z); } //-------------------------------------------------------------------------------------------------------------------------------- __device__ inline Real4 LaplacianOperator( float G_i[9], float L_i[9], Real3 dist3, Real4 posRadA, Real4 posRadB, Real fA, Real fB, Real4 rhoPresMuA, Real4 rhoPresMuB) { Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5); Real d = length(dist3); Real3 eij = dist3/d; Real Vol = paramsD.markerMass/rhoPresMuB.x; Real fij = fA - fB; Real ex_Gwx = eij.x*gradW.x; Real ex_Gwy = eij.x*gradW.y; Real ex_Gwz = eij.x*gradW.z; Real ey_Gwx = eij.y*gradW.x; Real ey_Gwy = eij.y*gradW.y; Real ey_Gwz = eij.y*gradW.z; Real ez_Gwx = eij.z*gradW.x; Real ez_Gwy = eij.z*gradW.y; Real ez_Gwz = eij.z*gradW.z; Real Part1 = L_i[0]*ex_Gwx + L_i[1]*ex_Gwy + L_i[2]*ex_Gwz + L_i[3]*ey_Gwx + L_i[4]*ey_Gwy + L_i[5]*ey_Gwz + L_i[6]*ez_Gwx + L_i[7]*ez_Gwy + L_i[8]*ez_Gwz; Real Part2 = fij/d * Vol; Real3 Part3 = mR3(-eij.x, -eij.y, -eij.z) * Vol; return mR4(2.0*Part1*Part2, Part3.x*(2.0*Part1), Part3.y*(2.0*Part1), Part3.z*(2.0*Part1)); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void EOS(Real4* sortedRhoPreMu, uint numAllMarkers, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; sortedRhoPreMu[index].y = Eos(sortedRhoPreMu[index].x, sortedRhoPreMu[index].w); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Navier_Stokes(Real4* sortedDerivVelRho, Real3* shift_r, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real3* sortedTauXxYyZz, Real3* sortedTauXyXzYz, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, Real MaxVel, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){ sortedDerivVelRho[index] = mR4(0.0); return; } Real3 posRadA = mR3(sortedPosRad[index]); Real3 velMasA = sortedVelMas[index]; Real4 rhoPresMuA = sortedRhoPreMu[index]; Real4 derivVelRho = mR4(0.0); Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real G_i[9] = {0.0}; Real A_i[27] = {0.0}; Real L_i[9] = {0.0}; calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers); if(!paramsD.elastic_SPH){ calc_A_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,G_i,cellStart,cellEnd,numAllMarkers); calc_L_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,L_i,G_i,cellStart,cellEnd,numAllMarkers); } float Gi[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0}; float Li[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0}; Gi[0] = G_i[0]; Gi[1] = G_i[1]; Gi[2] = G_i[2]; Gi[3] = G_i[3]; Gi[4] = G_i[4]; Gi[5] = G_i[5]; Gi[6] = G_i[6]; Gi[7] = G_i[7]; Gi[8] = G_i[8]; Li[0] = L_i[0]; Li[1] = L_i[1]; Li[2] = L_i[2]; Li[3] = L_i[3]; Li[4] = L_i[4]; Li[5] = L_i[5]; Li[6] = L_i[6]; Li[7] = L_i[7]; Li[8] = L_i[8]; // Real3 posGra = mR3(0.0); // Real4 posLap = mR4(0.0); Real3 preGra = mR3(0.0); Real3 velxGra = mR3(0.0); Real3 velyGra = mR3(0.0); Real3 velzGra = mR3(0.0); Real4 velxLap = mR4(0.0); Real4 velyLap = mR4(0.0); Real4 velzLap = mR4(0.0); Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;//1.129;//1.241 Real invRadii = 1.0/radii; Real3 v_ab = (velMasA + velMasA)*0.5; Real v_ab_m = length(v_ab); Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ; // Real3 numeratorXxYyZz = mR3(0.0); // Real3 numeratorXyXzYz = mR3(0.0); // Real denominator = 1e-9; // get address in grid int3 gridPos = calcGridPos(posRadA); Real3 inner_sum = mR3(0.0); // Real mi_bar = 0.0, r0 = 0.0; Real sum_w_i = W3h_GPU(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); int N_ = 1; int N_s = 0; for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { Real3 posRadB = mR3(sortedPosRad[j]); // Real3 dist3Alpha = posRadA - posRadB; Real3 dist3 = Distance(posRadA, posRadB); // change from B-A to A-B Real d = length(dist3); if (d > SuppRadii) continue; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) { // no rigid-rigid force continue; } Real invd = 1.0 / d; // modifyPressure(rhoPresMuB, dist3Alpha); // if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) { // printf("Error! particle rhoPresMuB is NAN: thrown from modifyPressure !\n"); // } Real3 velMasB = sortedVelMas[j]; if (rhoPresMuB.w > -1.0) { int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers); // if (!(bceIndexB >= 0 && // bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) { // printf("Error! bceIndex out of bound, collideCell !\n"); // } rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; velMasB = velMas_ModifiedBCE[bceIndexB]; } Real multViscosit = 1; // if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) { // printf("Error! particle rhoPresMuB is NAN: thrown from collideCell ! type=%f\n", // rhoPresMuB.w); // } // change from "-=" to "+=" if(paramsD.elastic_SPH){ Real3 gradW = GradWh(dist3, paramsD.HSML); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; gradW = gradW_new; derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA, velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit, sortedTauXxYyZz[index], sortedTauXyXzYz[index], sortedTauXxYyZz[j], sortedTauXyXzYz[j]); } else{ derivVelRho += DifVelocityRho(Gi, dist3, d, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA, velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit); preGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], -rhoPresMuA.y, rhoPresMuB.y, rhoPresMuA, rhoPresMuB); velxGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB); velyGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB); velzGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB); velxLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB); velyLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB); velzLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB); } if (d > paramsD.HSML*1.0e-9 && sum_w_i < paramsD.C_Wi) { // Real m_j = cube(sortedPosRad[j].w * paramsD.MULT_INITSPACE) * paramsD.rho0; // mi_bar += m_j; // r0 += d; // inner_sum += m_j * dist3 / (d * d * d); sum_w_i = sum_w_i + W3h_GPU(d, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); N_ = N_ + 1; } // find particles that have contact with this particle if(N_s < 12 && d < 2.0*radii){ Real Pen = (radii - d) * invRadii; Real3 r_0 = bsvdT * invd * dist3 ; Real3 r_s = r_0 * Pen; if (d < 1.0*radii) { inner_sum += 3.0*r_s; N_s = N_s + 1; } else if (d < 1.1*radii) { inner_sum += 1.0*r_s; N_s = N_s + 1; } else { inner_sum += 0.1 * 1.0 * (-r_0); N_s = N_s + 1; } } // posGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], // square(posRadA.x), square(posRadB.x), rhoPresMuA, rhoPresMuB); // posLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], // square(posRadA.x), square(posRadB.x), rhoPresMuA, rhoPresMuB); } } } } } if(paramsD.elastic_SPH){ if(sum_w_i < paramsD.C_Wi){ derivVelRho.w = -1.0; } else{ derivVelRho.w = 1.0; } } if(!paramsD.elastic_SPH){ Real nu = paramsD.mu0/paramsD.rho0; Real dvxdt = -preGra.x/rhoPresMuA.x + (velxLap.x + velxGra.x*velxLap.y + velxGra.y*velxLap.z + velxGra.z*velxLap.w) * nu; Real dvydt = -preGra.y/rhoPresMuA.x + (velyLap.x + velyGra.x*velyLap.y + velyGra.y*velyLap.z + velyGra.z*velyLap.w) * nu; Real dvzdt = -preGra.z/rhoPresMuA.x + (velzLap.x + velzGra.x*velzLap.y + velzGra.y*velzLap.z + velzGra.z*velzLap.w) * nu; Real drhodt = -paramsD.rho0*(velxGra.x + velyGra.y + velzGra.z); Real Det_G = (Gi[0] * Gi[4] * Gi[8] - Gi[0] * Gi[5] * Gi[7] - Gi[1] * Gi[3] * Gi[8] + Gi[1] * Gi[5] * Gi[6] + Gi[2] * Gi[3] * Gi[7] - Gi[2] * Gi[4] * Gi[6]); Real Det_L = (Li[0] * Li[4] * Li[8] - Li[0] * Li[5] * Li[7] - Li[1] * Li[3] * Li[8] + Li[1] * Li[5] * Li[6] + Li[2] * Li[3] * Li[7] - Li[2] * Li[4] * Li[6]); if(rhoPresMuA.w == -1){ if( Det_G > 0.9 && Det_G < 1.1 && Det_L > 0.9 && Det_L < 1.1 && sum_w_i > 0.9){ // printf("Det_G, Det_L %f %f %f %f %f %d\n", Det_G, Det_L, posRadA.x, posRadA.y, posRadA.z, N_); derivVelRho = mR4(dvxdt, dvydt, dvzdt, drhodt); } // Real dvdt =length(mR3(derivVelRho)); // Real coeff = 1000.0/dvdt; // if(dvdt > 1000.0){ // derivVelRho = mR4(dvxdt * coeff, dvydt * coeff, dvzdt * coeff, 0.0); // derivVelRho = mR4(0.0); // } } } if (!(isfinite(derivVelRho.x) && isfinite(derivVelRho.y) && isfinite(derivVelRho.z))) { printf("Error! particle derivVel is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n"); *isErrorD = true; } if (!(isfinite(derivVelRho.w))) { printf("Error! particle derivRho is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n"); *isErrorD = true; } // add gravity and other body force to fluid markers if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){ Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity; derivVelRho += mR4(totalFluidBodyForce3); } sortedDerivVelRho[index] = derivVelRho; // r0 /= N_; // mi_bar /= N_; // if (sum_w_i > 0.95 && sortedRhoPreMu[index].w < -0.5 ) // shift_r[index] = paramsD.beta_shifting * r0 * r0 * MaxVel * paramsD.dT * inner_sum / (mi_bar+1e-9); // else // shift_r[index] = mR3(0.0); Real det_r_max = length(0.05*velMasA*paramsD.dT); Real det_r_A = length(inner_sum); if(det_r_A < det_r_max){ shift_r[index] = inner_sum; } else{ shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9); } // shift_r[index] = mR3(0.0); // shift_r[index].y = 0.0; // if (sum_w_i < 0.95 && sortedRhoPreMu[index].w < -0.5) // printf("Finished in %f %f %f %f %f\n", sum_w_i, sortedPosRad[index].x, sortedPosRad[index].y, sortedPosRad[index].z, sortedRhoPreMu[index].w); } __global__ void NS_SSR( Real4* sortedDerivVelRho, Real3* sortedDerivTauXxYyZz, Real3* sortedDerivTauXyXzYz, Real3* shift_r, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real3* sortedTauXxYyZz, Real3* sortedTauXyXzYz, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){ sortedDerivVelRho[index] = mR4(0.0); sortedDerivTauXxYyZz[index] = mR3(0.0); sortedDerivTauXyXzYz[index] = mR3(0.0); return; } Real3 posRadA = mR3(sortedPosRad[index]); Real3 velMasA = sortedVelMas[index]; Real4 rhoPresMuA = sortedRhoPreMu[index]; Real hA = sortedPosRad[index].w; Real4 derivVelRho = mR4(0.0); Real3 deltaV = mR3(0); Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; uint j_list[150]; uint j_num = 0; // Get address in grid int3 gridPos = calcGridPos(posRadA); for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d < SuppRadii){ j_list[j_num] = j; j_num++; } } } } } } Real tauxx = sortedTauXxYyZz[index].x; Real tauyy = sortedTauXxYyZz[index].y; Real tauzz = sortedTauXxYyZz[index].z; Real tauxy = sortedTauXyXzYz[index].x; Real tauxz = sortedTauXyXzYz[index].y; Real tauyz = sortedTauXyXzYz[index].z; Real tauzx = tauxz; Real tauzy = tauyz; Real tauyx = tauxy; Real dTauxx = 0.0; Real dTauyy = 0.0; Real dTauzz = 0.0; Real dTauxy = 0.0; Real dTauxz = 0.0; Real dTauyz = 0.0; Real G_i[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0}; { Real mGi[9] = {0.0}; for(uint n = 0; n < j_num; n++){ uint j = j_list[n]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real3 grad_i_wij = GradWh(rij, hA); Real3 grw_vj = grad_i_wij * paramsD.volume0; mGi[0] -= rij.x * grw_vj.x; mGi[1] -= rij.x * grw_vj.y; mGi[2] -= rij.x * grw_vj.z; mGi[3] -= rij.y * grw_vj.x; mGi[4] -= rij.y * grw_vj.y; mGi[5] -= rij.y * grw_vj.z; mGi[6] -= rij.z * grw_vj.x; mGi[7] -= rij.z * grw_vj.y; mGi[8] -= rij.z * grw_vj.z; } Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); if (abs(Det) > 0.01) { Real OneOverDet = 1.0 / Det; G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet; G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet; G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet; G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet; G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet; G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet; G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet; G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet; G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet; } } Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;//1.129;//1.241 Real invRadii = 1.0/radii; Real3 v_ab = (velMasA + velMasA)*0.5; Real v_ab_m = length(v_ab); Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ; Real3 inner_sum = mR3(0.0); Real sum_w_i = W3h_GPU(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); int N_ = 1; int N_s = 0; for(uint n = 0; n < j_num; n++){ uint j = j_list[n]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) { // no rigid-rigid force continue; } Real invd = 1.0 / d; Real3 velMasB = sortedVelMas[j]; if (rhoPresMuB.w > -1.0) { int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers); rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; velMasB = velMas_ModifiedBCE[bceIndexB]; } Real multViscosit = 1; // For granular material dynamics // Real rhoB = rhoPresMuB.x; Real hB = sortedPosRad[j].w; // Real mB = paramsD.markerMass; Real3 gradW = GradWh(dist3, (hA + hB) * 0.5); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; gradW = gradW_new; derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA, velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit, sortedTauXxYyZz[index], sortedTauXyXzYz[index], sortedTauXxYyZz[j], sortedTauXyXzYz[j]); if(sortedRhoPreMu[index].w < -0.5){ // start to calculate the stress rate Real Gm = paramsD.G_shear; // shear modulus of the material Real half_mB_over_rhoB = 0.5 * paramsD.volume0; //(mB / rhoB); Real3 velMasB_new = velMasB; if (rhoPresMuB.w > -1.0) velMasB_new = 2.0*velMasB - velMasA; // noslip BC Real3 vAB = velMasA - velMasB_new; Real3 vAB_h = vAB * half_mB_over_rhoB; // entries of strain rate tensor Real exx = -2.0 * vAB_h.x * gradW.x; Real eyy = -2.0 * vAB_h.y * gradW.y; Real ezz = -2.0 * vAB_h.z * gradW.z; Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x; Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x; Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y; // entries of rotation rate (spin) tensor // Real wxx = 0.0; // Real wyy = 0.0; // Real wzz = 0.0; Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x; Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x; Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y; Real wyx = -wxy; // Real wzx = -wxz; Real wzy = -wyz; Real edia = 1.0 / 3.0 * (exx + eyy + ezz); Real twoGm = 2.0 * Gm; Real K_edia = paramsD.K_bulk*1.0*edia; dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia; dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia; dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia; dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy); dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz); dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz); } // Do integration for the kernel function if (d > paramsD.HSML*1.0e-9) { Real Wab = W3h_GPU(d, sortedPosRad[index].w); sum_w_i = sum_w_i + Wab * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); // XSPH if (rhoPresMuB.w > -1.5 && rhoPresMuB.w < -0.5){ deltaV += paramsD.volume0 * (velMasB - velMasA) * Wab; } N_ = N_ + 1; } // Find particles that have contact with this particle if(N_s < 12 && d < 2.0*radii){ Real Pen = (radii - d) * invRadii; Real3 r_0 = bsvdT * invd * dist3 ; Real3 r_s = r_0 * Pen; if (d < 1.0*radii) { inner_sum += 3.0*r_s; N_s = N_s + 1; } else if (d < 1.1*radii) { inner_sum += 1.0*r_s; N_s = N_s + 1; } else { inner_sum += 0.1 * 1.0 * (-r_0); N_s = N_s + 1; } } } // Check particles who have not enough neighbor particles (only for granular now) if(sum_w_i < paramsD.C_Wi){ derivVelRho.w = -1.0; } else{ derivVelRho.w = 1.0; } // Calculate the shifting vector Real det_r_max = length(0.05*velMasA*paramsD.dT); Real det_r_A = length(inner_sum); if(det_r_A < det_r_max){ shift_r[index] = inner_sum; } else{ shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9); } shift_r[index] += deltaV * paramsD.dT; shift_r[index] = shift_r[index] * (1.0 / paramsD.dT); // add gravity other body force to fluid markers if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){ Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity; derivVelRho += mR4(totalFluidBodyForce3); } sortedDerivVelRho[index] = derivVelRho; sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz); sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void CalcVel_XSPH_D(Real3* vel_XSPH_Sorted_D, // output: new velocity Real4* sortedPosRad_old, // input: sorted positions Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, // input: sorted velocities Real4* sortedRhoPreMu, Real3* shift_r, uint* gridMarkerIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; Real4 rhoPreMuA = sortedRhoPreMu[index]; Real3 velMasA = sortedVelMas[index]; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real3 posRadA = mR3(sortedPosRad_old[index]); Real3 deltaV = mR3(0); // get address in grid int3 gridPos = calcGridPos(posRadA); Real3 inner_sum = mR3(0.0); // Real mi_bar = 0.0, r0 = 0.0; Real3 dV = mR3(0.0f); // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { // check not colliding with self Real3 posRadB = mR3(sortedPosRad_old[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuB.w != -1.0) continue; Real3 velMasB = sortedVelMas[j]; Real rho_bar = 0.5 * (rhoPreMuA.x + rhoPresMuB.x); deltaV += paramsD.markerMass * (velMasB - velMasA) * W3h_GPU(d, (sortedPosRad_old[index].w + sortedPosRad_old[j].w) * 0.5) / rho_bar; } } } } } vel_XSPH_Sorted_D[index] = deltaV + shift_r[index]*(1.0/paramsD.dT); // sortedPosRad[index] += mR4(shift_r[index], 0.0); // if (!(isfinite(vel_XSPH_Sorted_D[index].x) && isfinite(vel_XSPH_Sorted_D[index].y) && isfinite(vel_XSPH_Sorted_D[index].z))) { printf("Error! particle vXSPH is NAN: thrown from ChFsiForceExplicitSPH.cu, newVel_XSPH_D !\n"); *isErrorD = true; } } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForceExplicitSPH::ChFsiForceExplicitSPH(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects) : ChFsiForce(otherBceWorker, otherSortedSphMarkersD, otherMarkersProximityD, otherFsiGeneralData, otherParamsH, otherNumObjects) { CopyParams_NumberOfObjects(paramsH, numObjectsH); density_initialization = 0; } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForceExplicitSPH::~ChFsiForceExplicitSPH() {} //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::Finalize() { ChFsiForce::Finalize(); hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); hipDeviceSynchronize(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD, std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD) { sphMarkersD = otherSphMarkersD; fsiCollisionSystem->ArrangeData(sphMarkersD); bceWorker->ModifyBceVelocity(sphMarkersD, otherFsiBodiesD); CollideWrapper(); CalculateXSPH_velocity(); // AddGravityToFluid(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::CollideWrapper() { bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); //------------------------------------------------------------------------ // thread per particle uint numThreads, numBlocks; computeGridSize((int)numObjectsH->numAllMarkers, 128, numBlocks, numThreads); /* Execute the kernel */ // thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers); thrust::device_vector<Real4> sortedDerivVelRho(numObjectsH->numAllMarkers); thrust::device_vector<Real3> sortedDerivTauXxYyZz(numObjectsH->numAllMarkers); thrust::device_vector<Real3> sortedDerivTauXyXzYz(numObjectsH->numAllMarkers); shift_r.resize(numObjectsH->numAllMarkers); // thrust::fill(_sumWij_rhoi.begin(), _sumWij_rhoi.end(), 0.); // thrust::fill(shift_r.begin(), shift_r.end(), mR3(0.0)); // thrust::fill(sortedDerivVelRho.begin(), sortedDerivVelRho.end(), mR4(0.0)); // thrust::fill(sortedDerivTauXxYyZz.begin(), sortedDerivTauXxYyZz.end(), mR3(0.0)); // thrust::fill(sortedDerivTauXyXzYz.begin(), sortedDerivTauXyXzYz.end(), mR3(0.0)); // thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD; if (density_initialization == 0){ thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers); thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD; printf("Re-initializing density after %d steps.\n", paramsH->densityReinit); hipLaunchKernelGGL(( calcRho_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR4CAST(rhoPresMuD_old), R1CAST(_sumWij_rhoi), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, density_initialization, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcRho_kernel"); } if(paramsH->elastic_SPH){ // execute the kernel Navier_Stokes and Shear_Stress_Rate in one kernel *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); // execute the kernel hipLaunchKernelGGL(( NS_SSR), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedDerivVelRho),mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz), mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE), mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD), U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes and Shear_Stress_Rate"); } else{ // EOS<<<numBlocks, numThreads>>>(mR4CAST(sortedSphMarkersD->rhoPresMuD), // numObjectsH->numAllMarkers, isErrorD); // ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "EOS"); thrust::device_vector<Real3>::iterator iter = thrust::max_element(sortedSphMarkersD->velMasD.begin(), sortedSphMarkersD->velMasD.end(), compare_Real3_mag()); ////unsigned int position = iter - sortedSphMarkersD->velMasD.begin(); Real MaxVel = length(*iter); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); // execute the kernel hipLaunchKernelGGL(( Navier_Stokes), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedDerivVelRho), mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE), mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD), // U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, MaxVel, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes"); } CopySortedToOriginal_Invasive_R4(fsiGeneralData->derivVelRhoD_old, sortedDerivVelRho, markersProximityD->gridMarkerIndexD); if(paramsH->elastic_SPH){ CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXxYyZzD, sortedDerivTauXxYyZz, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXyXzYzD, sortedDerivTauXyXzYz, markersProximityD->gridMarkerIndexD); } sortedDerivVelRho.clear(); sortedDerivTauXxYyZz.clear(); sortedDerivTauXyXzYz.clear(); hipFree(isErrorD); free(isErrorH); density_initialization++; if (density_initialization >= paramsH->densityReinit) density_initialization = 0; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::CalculateXSPH_velocity() { /* Calculate vel_XSPH */ if (vel_XSPH_Sorted_D.size() != numObjectsH->numAllMarkers) { printf("vel_XSPH_Sorted_D.size() %zd numObjectsH->numAllMarkers %zd \n", vel_XSPH_Sorted_D.size(), numObjectsH->numAllMarkers); throw std::runtime_error( "Error! size error vel_XSPH_Sorted_D Thrown from " "CalculateXSPH_velocity!\n"); } bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); //------------------------------------------------------------------------ if(paramsH->elastic_SPH){ // The XSPH vector already included in the shifting vector CopySortedToOriginal_Invasive_R3(fsiGeneralData->vel_XSPH_D, shift_r, markersProximityD->gridMarkerIndexD); } else{ /* thread per particle */ uint numThreads, numBlocks; computeGridSize((uint)numObjectsH->numAllMarkers, 128, numBlocks, numThreads); thrust::device_vector<Real4> sortedPosRad_old = sortedSphMarkersD->posRadD; thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0)); /* Execute the kernel */ hipLaunchKernelGGL(( CalcVel_XSPH_D), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(vel_XSPH_Sorted_D), mR4CAST(sortedPosRad_old), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(shift_r), U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "CalcVel_XSPH_D"); CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D, markersProximityD->gridMarkerIndexD); // CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD, markersProximityD->gridMarkerIndexD); } if (density_initialization % paramsH->densityReinit == 0) CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD, sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD); hipFree(isErrorD); free(isErrorH); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::AddGravityToFluid() { // add gravity to fluid markers /* Add outside forces. Don't add gravity to rigids, BCE, and boundaries, it is * added in ChSystem */ Real3 totalFluidBodyForce3 = paramsH->bodyForce3 + paramsH->gravity; thrust::device_vector<Real4> bodyForceD(numObjectsH->numAllMarkers); thrust::fill(bodyForceD.begin(), bodyForceD.end(), mR4(totalFluidBodyForce3)); thrust::transform( fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x, fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].y, bodyForceD.begin(), fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x, thrust::plus<Real4>()); bodyForceD.clear(); } } // namespace fsi } // namespace chrono //================================================================================================================================
1b94e2202819beeb49d8e6338863e161a46095a3.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Arman Pazouki, Wei Hu // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForceExplicitSPH.cuh" //================================================================================================================================ namespace chrono { namespace fsi { __device__ __inline__ void calc_G_Matrix(Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* G_i, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; // get address in grid int3 gridPos = calcGridPos(posRadA); // This is the elements of inverse of G Real mGi[9] = {0.0}; // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real3 grad_i_wij = GradWh(rij, h_i); Real3 grw_vj = grad_i_wij * paramsD.volume0; mGi[0] -= rij.x * grw_vj.x; mGi[1] -= rij.x * grw_vj.y; mGi[2] -= rij.x * grw_vj.z; mGi[3] -= rij.y * grw_vj.x; mGi[4] -= rij.y * grw_vj.y; mGi[5] -= rij.y * grw_vj.z; mGi[6] -= rij.z * grw_vj.x; mGi[7] -= rij.z * grw_vj.y; mGi[8] -= rij.z * grw_vj.z; } } } Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); if (abs(Det) > 0.01) { Real OneOverDet = 1.0/Det; G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet; G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet; G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet; G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet; G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet; G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet; G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet; G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet; G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet; } else { for (int i = 0; i < 9; i++) { G_i[i] = 0.0; } G_i[0] = 1; G_i[4] = 1; G_i[8] = 1; } } __device__ __inline__ void calc_A_Matrix(Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* A_i, Real* G_i, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; // get address in grid int3 gridPos = calcGridPos(posRadA); // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real h_j = sortedPosRad[j].w; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = paramsD.markerMass/paramsD.rho0; Real com_part = 0; com_part = (G_i[0] * grad_ij.x + G_i[1] * grad_ij.y + G_i[2] * grad_ij.z) * V_j; A_i[0] += rij.x * rij.x * com_part; // 111 A_i[1] += rij.x * rij.y * com_part; // 112 A_i[2] += rij.x * rij.z * com_part; // 113 A_i[3] += rij.y * rij.x * com_part; // 121 A_i[4] += rij.y * rij.y * com_part; // 122 A_i[5] += rij.y * rij.z * com_part; // 123 A_i[6] += rij.z * rij.x * com_part; // 131 A_i[7] += rij.z * rij.y * com_part; // 132 A_i[8] += rij.z * rij.z * com_part; // 133 com_part = (G_i[3] * grad_ij.x + G_i[4] * grad_ij.y + G_i[5] * grad_ij.z) * V_j; A_i[9] += rij.x * rij.x * com_part; // 211 A_i[10] += rij.x * rij.y * com_part; // 212 A_i[11] += rij.x * rij.z * com_part; // 213 A_i[12] += rij.y * rij.x * com_part; // 221 A_i[13] += rij.y * rij.y * com_part; // 222 A_i[14] += rij.y * rij.z * com_part; // 223 A_i[15] += rij.z * rij.x * com_part; // 231 A_i[16] += rij.z * rij.y * com_part; // 232 A_i[17] += rij.z * rij.z * com_part; // 233 com_part = (G_i[6] * grad_ij.x + G_i[7] * grad_ij.y + G_i[8] * grad_ij.z) * V_j; A_i[18] += rij.x * rij.x * com_part; // 311 A_i[19] += rij.x * rij.y * com_part; // 312 A_i[20] += rij.x * rij.z * com_part; // 313 A_i[21] += rij.y * rij.x * com_part; // 321 A_i[22] += rij.y * rij.y * com_part; // 322 A_i[23] += rij.y * rij.z * com_part; // 323 A_i[24] += rij.z * rij.x * com_part; // 331 A_i[25] += rij.z * rij.y * com_part; // 332 A_i[26] += rij.z * rij.z * com_part; // 333 } } } } __device__ __inline__ void calc_L_Matrix(Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* A_i, Real* L_i, Real* G_i, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real B[36] = {0.0}; Real L[6] = {0.0}; // get address in grid int3 gridPos = calcGridPos(posRadA); // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; // Real m_j = paramsD.markerMass; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = paramsD.markerMass/paramsD.rho0; Real com_part = 0; // mn=11 Real XX = (eij.x * grad_ij.x); Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x); Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x); Real YY = (eij.y * grad_ij.y); Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y); Real ZZ = (eij.z * grad_ij.z); com_part = (A_i[0] * eij.x + A_i[9] * eij.y + A_i[18] * eij.z + rij.x * eij.x) * V_j; B[6 * 0 + 0] += com_part * XX; // 11 B[6 * 0 + 1] += com_part * XY; // 12 B[6 * 0 + 2] += com_part * XZ; // 13 B[6 * 0 + 3] += com_part * YY; // 14 B[6 * 0 + 4] += com_part * YZ; // 15 B[6 * 0 + 5] += com_part * ZZ; // 15 // mn=12 com_part = (A_i[1] * eij.x + A_i[10] * eij.y + A_i[19] * eij.z + rij.x * eij.y) * V_j; B[6 * 1 + 0] += com_part * XX; // 21 B[6 * 1 + 1] += com_part * XY; // 22 B[6 * 1 + 2] += com_part * XZ; // 23 B[6 * 1 + 3] += com_part * YY; // 24 B[6 * 1 + 4] += com_part * YZ; // 25 B[6 * 1 + 5] += com_part * ZZ; // 25 // mn=13 com_part = (A_i[2] * eij.x + A_i[11] * eij.y + A_i[20] * eij.z + rij.x * eij.z) * V_j; B[6 * 2 + 0] += com_part * XX; // 31 B[6 * 2 + 1] += com_part * XY; // 32 B[6 * 2 + 2] += com_part * XZ; // 33 B[6 * 2 + 3] += com_part * YY; // 34 B[6 * 2 + 4] += com_part * YZ; // 35 B[6 * 2 + 5] += com_part * ZZ; // 36 // Note that we skip mn=21 since it is similar to mn=12 // mn=22 com_part = (A_i[4] * eij.x + A_i[13] * eij.y + A_i[22] * eij.z + rij.y * eij.y) * V_j; B[6 * 3 + 0] += com_part * XX; // 41 B[6 * 3 + 1] += com_part * XY; // 42 B[6 * 3 + 2] += com_part * XZ; // 43 B[6 * 3 + 3] += com_part * YY; // 44 B[6 * 3 + 4] += com_part * YZ; // 45 B[6 * 3 + 5] += com_part * ZZ; // 46 // mn=23 com_part = (A_i[5] * eij.x + A_i[14] * eij.y + A_i[23] * eij.z + rij.y * eij.z) * V_j; B[6 * 4 + 0] += com_part * XX; // 51 B[6 * 4 + 1] += com_part * XY; // 52 B[6 * 4 + 2] += com_part * XZ; // 53 B[6 * 4 + 3] += com_part * YY; // 54 B[6 * 4 + 4] += com_part * YZ; // 55 B[6 * 4 + 5] += com_part * ZZ; // 56 // mn=33 com_part = (A_i[8] * eij.x + A_i[17] * eij.y + A_i[26] * eij.z + rij.z * eij.z) * V_j; B[6 * 5 + 0] += com_part * XX; // 61 B[6 * 5 + 1] += com_part * XY; // 62 B[6 * 5 + 2] += com_part * XZ; // 63 B[6 * 5 + 3] += com_part * YY; // 64 B[6 * 5 + 4] += com_part * YZ; // 65 B[6 * 5 + 5] += com_part * ZZ; // 66 } } } inv6xdelta_mn(B, L); L_i[0] = L[0]; L_i[1] = L[1]; L_i[2] = L[2]; L_i[3] = L[1]; L_i[4] = L[3]; L_i[5] = L[4]; L_i[6] = L[2]; L_i[7] = L[4]; L_i[8] = L[5]; // Real Det = (L_i[0] * L_i[4] * L_i[8] - L_i[0] * L_i[5] * L_i[7] - L_i[1] * L_i[3] * L_i[8] + // L_i[1] * L_i[5] * L_i[6] + L_i[2] * L_i[3] * L_i[7] - L_i[2] * L_i[4] * L_i[6]); // if (abs(Det) < 0.01) { // for (int i = 0; i < 9; i++) { // L_i[0 * 9 + i] = 0.0; // L_i[0 * 9 + 0] = 1; // L_i[0 * 9 + 4] = 1; // L_i[0 * 9 + 8] = 1; // } // } // printf("L Det %f\n", Det); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Shear_Stress_Rate(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real3* sortedVelMas, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real3* sortedTauXxYyZz, Real3* sortedTauXyXzYz, Real3* sortedDerivTauXxYyZz, Real3* sortedDerivTauXyXzYz, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, const size_t numAllMarkers) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) { return; } if (sortedRhoPreMu[index].w > -0.5) { return; } Real3 posRadA = mR3(sortedPosRad[index]); Real3 velMasA = sortedVelMas[index]; Real hA = sortedPosRad[index].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real tauxx = sortedTauXxYyZz[index].x; Real tauyy = sortedTauXxYyZz[index].y; Real tauzz = sortedTauXxYyZz[index].z; Real tauxy = sortedTauXyXzYz[index].x; Real tauxz = sortedTauXyXzYz[index].y; Real tauyz = sortedTauXyXzYz[index].z; Real tauzx = tauxz; Real tauzy = tauyz; Real tauyx = tauxy; Real dTauxx = 0.0; Real dTauyy = 0.0; Real dTauzz = 0.0; Real dTauxy = 0.0; Real dTauxz = 0.0; Real dTauyz = 0.0; Real G_i[9] = {0.0}; calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers); // get address in grid int3 gridPos = calcGridPos(posRadA); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; Real3 velMasB = sortedVelMas[j]; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuB.w > -1.0) { int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers); if (!(bceIndexB >= 0 && bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) { printf("Error! bceIndex out of bound, collideCell !\n"); } rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; // to check velMasB = velMas_ModifiedBCE[bceIndexB]; // to check velMasB = 2.0*velMasB - velMasA; // noslip BC } Real rhoB = rhoPresMuB.x; Real hB = sortedPosRad[j].w; Real mB = paramsD.markerMass; Real3 gradW = GradWh(dist3, (hA + hB) * 0.5); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; gradW = gradW_new; // start to calculate the rate Real Gm = paramsD.G_shear; // shear modulus of the material Real half_mB_over_rhoB = 0.5 * (mB / rhoB); Real3 vAB = velMasA - velMasB; Real3 vAB_h = (velMasA - velMasB) * half_mB_over_rhoB; // entries of strain rate tensor Real exx = -2.0 * vAB_h.x * gradW.x; Real eyy = -2.0 * vAB_h.y * gradW.y; Real ezz = -2.0 * vAB_h.z * gradW.z; Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x; Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x; Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y; // entries of rotation rate (spin) tensor // Real wxx = 0.0; // Real wyy = 0.0; // Real wzz = 0.0; Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x; Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x; Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y; Real wyx = -wxy; // Real wzx = -wxz; Real wzy = -wyz; Real edia = 1.0 / 3.0 * (exx + eyy + ezz); Real twoGm = 2.0 * Gm; Real K_edia = paramsD.K_bulk*1.0*edia; dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia; dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia; dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia; dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy); dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz); dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz); } } } } } sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz); sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcRho_kernel(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real4* sortedRhoPreMu_old, Real* _sumWij_rhoi, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, int density_reinit, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } sortedRhoPreMu_old[i_idx].y = Eos(sortedRhoPreMu_old[i_idx].x, sortedRhoPreMu_old[i_idx].w); Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real sum_mW = 0; Real sum_mW_rho = 0.0000001; Real sum_W = 0.0; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; if (sortedRhoPreMu_old[j].w == -1) { // Real h_j = sortedPosRad[j].w; Real m_j = paramsD.markerMass; // cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real W3 = W3h_GPU(d, 0.5 * (h_j + h_i)); sum_mW += m_j * W3; sum_W += W3; sum_mW_rho += m_j * W3 / sortedRhoPreMu_old[j].x; } } } } } } // sumWij_inv[i_idx] = paramsD.markerMass / sum_mW; // sortedRhoPreMu[i_idx].x = sum_mW; if ((density_reinit == 0) && (sortedRhoPreMu[i_idx].w == -1)) sortedRhoPreMu[i_idx].x = sum_mW / sum_mW_rho; if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0.01 * paramsD.rho0) && sortedRhoPreMu[i_idx].w == -1) printf("(calcRho_kernel)density marker %d, sum_mW=%f, sum_W=%f, h_i=%f\n", i_idx, sum_mW, sum_W, h_i); } //-------------------------------------------------------------------------------------------------------------------------------- // modify pressure for body force __device__ __inline__ void modifyPressure(Real4& rhoPresMuB, const Real3& dist3Alpha) { // body force in x direction rhoPresMuB.y = (dist3Alpha.x > 0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y - paramsD.deltaPress.x) : rhoPresMuB.y; rhoPresMuB.y = (dist3Alpha.x < -0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y + paramsD.deltaPress.x) : rhoPresMuB.y; // body force in x direction rhoPresMuB.y = (dist3Alpha.y > 0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y - paramsD.deltaPress.y) : rhoPresMuB.y; rhoPresMuB.y = (dist3Alpha.y < -0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y + paramsD.deltaPress.y) : rhoPresMuB.y; // body force in x direction rhoPresMuB.y = (dist3Alpha.z > 0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y - paramsD.deltaPress.z) : rhoPresMuB.y; rhoPresMuB.y = (dist3Alpha.z < -0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y + paramsD.deltaPress.z) : rhoPresMuB.y; } //-------------------------------------------------------------------------------------------------------------------------------- __device__ inline Real3 CubicSolve(Real aa, Real bb, Real cc, Real dd) { Real disc, q, r, dum1, dum2, term1, r13; bb /= aa; cc /= aa; dd /= aa; if (aa == 0) { return mR3(0, 0, 0); } if (abs(bb) < 1e-9) { return mR3(0, 0, 0); } if (abs(cc) < 1e-9) { return mR3(0, 0, 0); } if (abs(dd) < 1e-9) { return mR3(0, 0, 0); } q = (3.0 * cc - (bb * bb)) / 9.0; r = -(27.0 * dd) + bb * (9.0 * cc - 2.0 * (bb * bb)); r /= 54.0; disc = q * q * q + r * r; term1 = (bb / 3.0); /* dataForm.x1Im.value = 0; //The first root is always real. if (disc > 0) { // one root real, two are complex s = r + Math.sqrt(disc); s = ((s < 0) ? -Math.pow(-s, (1.0/3.0)) : Math.pow(s, (1.0/3.0))); t = r - Math.sqrt(disc); t = ((t < 0) ? -Math.pow(-t, (1.0/3.0)) : Math.pow(t, (1.0/3.0))); dataForm.x1Re.value = -term1 + s + t; term1 += (s + t)/2.0; dataForm.x3Re.value = dataForm.x2Re.value = -term1; term1 = Math.sqrt(3.0)*(-t + s)/2; dataForm.x2Im.value = term1; dataForm.x3Im.value = -term1; return; } // End if (disc > 0) // The remaining options are all real dataForm.x3Im.value = dataForm.x2Im.value = 0; if (disc == 0){ // All roots real, at least two are equal. r13 = ((r < 0) ? -Math.pow(-r,(1.0/3.0)) : Math.pow(r,(1.0/3.0))); dataForm.x1Re.value = -term1 + 2.0*r13; dataForm.x3Re.value = dataForm.x2Re.value = -(r13 + term1); return; } // End if (disc == 0) */ Real xRex, xRey, xRez; // have complex root if (disc > 0) { xRex = 0.0; xRey = 0.0; xRez = 0.0; return mR3(xRex, xRey, xRez); } // All roots real, at least two are equal. if (disc == 0) { if (r < 0) { r13 = pow(-r, (1.0 / 3.0)); } else { r13 = pow(r, (1.0 / 3.0)); } xRex = -term1 + 2.0 * r13; xRey = -(r13 + term1); xRez = xRey; return mR3(xRex, xRey, xRez); } // All roots are real and unequal (to get here, q < 0) q = -q; dum1 = q * q * q; dum2 = r / (sqrt(dum1 + 1.0e-9)); if ((dum2 >= 0) && (dum2 <= 1)) { dum1 = acos(dum2); } else { xRex = 0.0; xRey = 0.0; xRez = 0.0; return mR3(xRex, xRey, xRez); } r13 = 2.0 * sqrt(q); xRex = -term1 + r13 * cos(dum1 / 3.0); xRey = -term1 + r13 * cos((dum1 + 2.0 * 3.1415926) / 3.0); xRez = -term1 + r13 * cos((dum1 + 4.0 * 3.1415926) / 3.0); return mR3(xRex, xRey, xRez); } __device__ inline Real3 CubicEigen(Real4 c1, Real4 c2, Real4 c3) { Real a = c1.x; Real b = c1.y; Real c = c1.z; Real d = c1.w; Real l = c2.x; Real m = c2.y; Real n = c2.z; Real k = c2.w; Real p = c3.x; Real q = c3.y; Real r = c3.z; Real s = c3.w; Real D = (a * m * r + b * p * n + c * l * q) - (a * n * q + b * l * r + c * m * p) + 1.0e-9; Real x = ((b * r * k + c * m * s + d * n * q) - (b * n * s + c * q * k + d * m * r)) / D; Real y = ((a * n * s + c * p * k + d * l * r) - (a * r * k + c * l * s + d * n * p)) / D; Real z = ((a * q * k + b * l * s + d * m * p) - (a * m * s + b * p * k + d * l * q)) / D; b = b + 1.0e-9; x = 1.0e0; z = (-l + a * m / b) / (n - c * m / b); y = (-a - c * z) / b; Real R = sqrt(x * x + y * y + z * z); x = x / R; y = y / R; z = z / R; // if(abs(D) < 1){ // return mR3(0,0,0); // } // if(abs(m) < 0.1){ // x=0; // y=1; // z=0; // return mR3(x,y,z); // } // else{ // y=0; // if(abs(c) > 0.1){ // x=1; // z=-a/c; // return mR3(x,y,z); // } // if(abs(a) > 0.1){ // z=1; // x=-c/a; // return mR3(x,y,z); // } // } return mR3(x, y, z); } //-------------------------------------------------------------------------------------------------------------------------------- /** * @brief DifVelocityRho * @details See SDKCollisionSystem.cuh */ __device__ inline Real4 DifVelocityRho(float G_i[9], Real3 dist3, Real d, Real4 posRadA, Real4 posRadB, Real3 velMasA, Real3 vel_XSPH_A, Real3 velMasB, Real3 vel_XSPH_B, Real4 rhoPresMuA, Real4 rhoPresMuB, Real multViscosity) { Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5); // Real3 gradW_new; // gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; // gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; // gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; // gradW = gradW_new; // Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3); // // // //*** Artificial viscosity type 1.1 // Real alpha = .001; // Real c_ab = 10 * paramsD.v_Max; // Ma = .1;//sqrt(7.0f * 10000 / // // ((rhoPresMuA.x + rhoPresMuB.x) / 2.0f)); // // Real h = paramsD.HSML; // Real rho = .5f * (rhoPresMuA.x + rhoPresMuB.x); // Real nu = alpha * paramsD.HSML * c_ab / rho; // // //*** Artificial viscosity type 1.2 // // Real nu = 22.8f * paramsD.mu0 / 2.0f / (rhoPresMuA.x * rhoPresMuB.x); // Real3 derivV = -paramsD.markerMass * // (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x) - // nu * vAB_Dot_rAB / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML)) * // gradW; // return mR4(derivV, rhoPresMuA.x * paramsD.markerMass / rhoPresMuB.x * dot(vel_XSPH_A - vel_XSPH_B, gradW)); //*** Artificial viscosity type 2 if (rhoPresMuA.w > -1 && rhoPresMuB.w > -1) return mR4(0.0); Real rAB_Dot_GradWh = dot(dist3, gradW); Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML); Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu0 * rAB_Dot_GradWh_OverDist * (velMasA - velMasB) / square(rhoPresMuA.x + rhoPresMuB.x); // Real derivRho = rhoPresMuA.x * paramsD.markerMass / rhoPresMuB.x * dot(vel_XSPH_A - vel_XSPH_B, gradW); // Real zeta = 0;//.05;//.1; // Real derivRho = rhoPresMuA.x * paramsD.markerMass * invrhoPresMuBx * //(dot(vel_XSPH_A - vel_XSPH_B, gradW) // + zeta * paramsD.HSML * (10 * paramsD.v_Max) * 2 * (rhoPresMuB.x /// rhoPresMuA.x - 1) * // rAB_Dot_GradWh_OverDist // ); //-------------------------------- // Ferrari Modification Real derivRho = paramsD.markerMass * dot(vel_XSPH_A - vel_XSPH_B, gradW); // Real cA = FerrariCi(rhoPresMuA.x); // Real cB = FerrariCi(rhoPresMuB.x); // derivRho += rAB_Dot_GradWh / (d + paramsD.epsMinMarkersDis * paramsD.HSML) * max(cA, cB) / rhoPresMuB.x * // (rhoPresMuB.x - rhoPresMuA.x); //*** Artificial viscosity Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3); // if (vAB_Dot_rAB < 0.0) { // if ((rhoPresMuA.w < - 0.5) && (rhoPresMuB.w < - 0.5)){ // only for fluid particles Real alpha = 0.0;//paramsD.Ar_vis_alpha; Real c_ab = paramsD.Cs; Real rho = 0.5f * (rhoPresMuA.x * rhoPresMuB.x); Real nu = -alpha * paramsD.HSML * c_ab / rho; Real derivM1 = -paramsD.markerMass * (nu * vAB_Dot_rAB / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML)); derivV.x += derivM1 * gradW.x; derivV.y += derivM1 * gradW.y; derivV.z += derivM1 * gradW.z; // } // } // -------------------------------- return mR4(derivV, derivRho); // //*** Artificial viscosity type 1.3 // Real rAB_Dot_GradWh = dot(dist3, gradW); // Real3 derivV = -paramsD.markerMass * // (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * // rhoPresMuB.x)) * gradW + // paramsD.markerMass / (rhoPresMuA.x * rhoPresMuB.x) * 2.0f * paramsD.mu0 * rAB_Dot_GradWh / // (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML) * (velMasA - velMasB); // return mR4(derivV, rhoPresMuA.x * paramsD.markerMass / rhoPresMuB.x * dot(vel_XSPH_A - vel_XSPH_B, gradW)); } /// Only for modelling elastic and granular problems __device__ inline Real4 DifVelocityRho_ElasticSPH(Real3 gradW, Real3 dist3, Real d, Real invd, Real4 posRadA, Real4 posRadB, Real3 velMasA_in, Real3 vel_XSPH_A_in, Real3 velMasB_in, Real3 vel_XSPH_B_in, Real4 rhoPresMuA, Real4 rhoPresMuB, Real multViscosity, Real3 tauXxYyZz_A_in, Real3 tauXyXzYz_A_in, Real3 tauXxYyZz_B_in, Real3 tauXyXzYz_B_in) { // if (rhoPresMuA.w > -1 ) // return mR4(0.0); // if (rhoPresMuB.w > -1 ) // return mR4(0.0); Real3 velMasA = velMasA_in; Real3 velMasB = velMasB_in; Real3 vel_XSPH_A = vel_XSPH_A_in; Real3 vel_XSPH_B = vel_XSPH_B_in; Real3 tauXxYyZz_A = tauXxYyZz_A_in; Real3 tauXxYyZz_B = tauXxYyZz_B_in; Real3 tauXyXzYz_A = tauXyXzYz_A_in; Real3 tauXyXzYz_B = tauXyXzYz_B_in; if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5) return mR4(0.0); if (rhoPresMuA.w < -0.5 && rhoPresMuB.w > -0.5){ tauXxYyZz_B = tauXxYyZz_A; tauXyXzYz_B = tauXyXzYz_A; vel_XSPH_B = 2.0*vel_XSPH_B - vel_XSPH_A; // noslip BC // velMasB = 2.0*velMasB - velMasA; // noslip BC } if (rhoPresMuA.w > -0.5 && rhoPresMuB.w < -0.5){ tauXxYyZz_A = tauXxYyZz_B; tauXyXzYz_A = tauXyXzYz_B; } Real txxA = tauXxYyZz_A.x; Real tyyA = tauXxYyZz_A.y; Real tzzA = tauXxYyZz_A.z; Real txyA = tauXyXzYz_A.x; Real txzA = tauXyXzYz_A.y; Real tyzA = tauXyXzYz_A.z; Real txxB = tauXxYyZz_B.x; Real tyyB = tauXxYyZz_B.y; Real tzzB = tauXxYyZz_B.z; Real txyB = tauXyXzYz_B.x; Real txzB = tauXyXzYz_B.y; Real tyzB = tauXyXzYz_B.z; // Real PA = rhoPresMuA.y; // Real PB = rhoPresMuB.y; // Real rhoA = rhoPresMuA.x; // Real rhoB = rhoPresMuB.x; // Real rhoA2 = rhoA * rhoA; // Real rhoB2 = rhoB * rhoB; Real Mass = paramsD.markerMass; Real MassOverRhoA2 = Mass * paramsD.invrho0 * paramsD.invrho0;//Mass/rhoA2; Real MassOverRhoB2 = MassOverRhoA2;//Mass/rhoB2; Real3 MA_gradW = gradW * MassOverRhoA2; Real3 MB_gradW = gradW * MassOverRhoB2; Real derivVx = //-Mass * (PA / (rhoA * rhoA) + PB / (rhoB * rhoB)) * gradW.x + (txxA * MA_gradW.x + txyA * MA_gradW.y + txzA * MA_gradW.z) + (txxB * MB_gradW.x + txyB * MB_gradW.y + txzB * MB_gradW.z) ; Real derivVy = //-Mass * (PA / (rhoA * rhoA) + PB / (rhoB * rhoB)) * gradW.y + (txyA * MA_gradW.x + tyyA * MA_gradW.y + tyzA * MA_gradW.z) + (txyB * MB_gradW.x + tyyB * MB_gradW.y + tyzB * MB_gradW.z) ; Real derivVz = //-Mass * (PA / (rhoA * rhoA) + PB / (rhoB * rhoB)) * gradW.z + (txzA * MA_gradW.x + tyzA * MA_gradW.y + tzzA * MA_gradW.z) + (txzB * MB_gradW.x + tyzB * MB_gradW.y + tzzB * MB_gradW.z) ; // TODO: visco-plastic model // Real vel = length(velMasA); // if(vel > 0.3){ // Real rAB_Dot_GradWh = dot(dist3, gradW); // Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML); // Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW // + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu_fric_s // * pow(rhoPresMuA.x + rhoPresMuB.x, Real(-2)) * rAB_Dot_GradWh_OverDist * (velMasA - velMasB); // derivVx = derivV.x; // derivVy = derivV.y; // derivVz = derivV.z; // } //*** Artificial viscosity Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3); // if (vAB_Dot_rAB < 0.0) { // if ((rhoPresMuA.w < - 0.5) && (rhoPresMuB.w < - 0.5)){ // only for fluid particles Real alpha = paramsD.Ar_vis_alpha; Real c_ab = paramsD.Cs; // Real rho = 0.5f * (rhoA + rhoB); Real nu = -alpha * paramsD.HSML * c_ab * paramsD.invrho0; Real derivM1 = -Mass * (nu * vAB_Dot_rAB * (invd * invd));//+ paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML derivVx += derivM1 * gradW.x; derivVy += derivM1 * gradW.y; derivVz += derivM1 * gradW.z; // } // } // damping force /*if (1 == 0) { Real xi0 = paramsD.Vis_Dam; Real E0 = paramsD.E_young; Real h0 = paramsD.HSML; Real Cd = xi0 * sqrt(E0 / (rhoA * h0 * h0)); derivVx -= Cd * velMasA.x; derivVy -= Cd * velMasA.y; derivVz -= Cd * velMasA.z; }*/ // Real derivRho = Mass * dot(vel_XSPH_A - vel_XSPH_B, gradW); return mR4(derivVx, derivVy, derivVz, 0.0); } //-------------------------------------------------------------------------------------------------------------------------------- __device__ inline Real3 GradientOperator( float G_i[9], Real3 dist3, Real4 posRadA, Real4 posRadB, Real fA, Real fB, Real4 rhoPresMuA, Real4 rhoPresMuB) { Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; Real Vol = paramsD.markerMass/rhoPresMuB.x; Real fji = fB - fA; Real Gra_ij_x = fji*gradW_new.x * Vol; Real Gra_ij_y = fji*gradW_new.y * Vol; Real Gra_ij_z = fji*gradW_new.z * Vol; return mR3(Gra_ij_x, Gra_ij_y, Gra_ij_z); } //-------------------------------------------------------------------------------------------------------------------------------- __device__ inline Real4 LaplacianOperator( float G_i[9], float L_i[9], Real3 dist3, Real4 posRadA, Real4 posRadB, Real fA, Real fB, Real4 rhoPresMuA, Real4 rhoPresMuB) { Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5); Real d = length(dist3); Real3 eij = dist3/d; Real Vol = paramsD.markerMass/rhoPresMuB.x; Real fij = fA - fB; Real ex_Gwx = eij.x*gradW.x; Real ex_Gwy = eij.x*gradW.y; Real ex_Gwz = eij.x*gradW.z; Real ey_Gwx = eij.y*gradW.x; Real ey_Gwy = eij.y*gradW.y; Real ey_Gwz = eij.y*gradW.z; Real ez_Gwx = eij.z*gradW.x; Real ez_Gwy = eij.z*gradW.y; Real ez_Gwz = eij.z*gradW.z; Real Part1 = L_i[0]*ex_Gwx + L_i[1]*ex_Gwy + L_i[2]*ex_Gwz + L_i[3]*ey_Gwx + L_i[4]*ey_Gwy + L_i[5]*ey_Gwz + L_i[6]*ez_Gwx + L_i[7]*ez_Gwy + L_i[8]*ez_Gwz; Real Part2 = fij/d * Vol; Real3 Part3 = mR3(-eij.x, -eij.y, -eij.z) * Vol; return mR4(2.0*Part1*Part2, Part3.x*(2.0*Part1), Part3.y*(2.0*Part1), Part3.z*(2.0*Part1)); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void EOS(Real4* sortedRhoPreMu, uint numAllMarkers, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; sortedRhoPreMu[index].y = Eos(sortedRhoPreMu[index].x, sortedRhoPreMu[index].w); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Navier_Stokes(Real4* sortedDerivVelRho, Real3* shift_r, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real3* sortedTauXxYyZz, Real3* sortedTauXyXzYz, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, Real MaxVel, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){ sortedDerivVelRho[index] = mR4(0.0); return; } Real3 posRadA = mR3(sortedPosRad[index]); Real3 velMasA = sortedVelMas[index]; Real4 rhoPresMuA = sortedRhoPreMu[index]; Real4 derivVelRho = mR4(0.0); Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real G_i[9] = {0.0}; Real A_i[27] = {0.0}; Real L_i[9] = {0.0}; calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers); if(!paramsD.elastic_SPH){ calc_A_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,G_i,cellStart,cellEnd,numAllMarkers); calc_L_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,L_i,G_i,cellStart,cellEnd,numAllMarkers); } float Gi[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0}; float Li[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0}; Gi[0] = G_i[0]; Gi[1] = G_i[1]; Gi[2] = G_i[2]; Gi[3] = G_i[3]; Gi[4] = G_i[4]; Gi[5] = G_i[5]; Gi[6] = G_i[6]; Gi[7] = G_i[7]; Gi[8] = G_i[8]; Li[0] = L_i[0]; Li[1] = L_i[1]; Li[2] = L_i[2]; Li[3] = L_i[3]; Li[4] = L_i[4]; Li[5] = L_i[5]; Li[6] = L_i[6]; Li[7] = L_i[7]; Li[8] = L_i[8]; // Real3 posGra = mR3(0.0); // Real4 posLap = mR4(0.0); Real3 preGra = mR3(0.0); Real3 velxGra = mR3(0.0); Real3 velyGra = mR3(0.0); Real3 velzGra = mR3(0.0); Real4 velxLap = mR4(0.0); Real4 velyLap = mR4(0.0); Real4 velzLap = mR4(0.0); Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;//1.129;//1.241 Real invRadii = 1.0/radii; Real3 v_ab = (velMasA + velMasA)*0.5; Real v_ab_m = length(v_ab); Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ; // Real3 numeratorXxYyZz = mR3(0.0); // Real3 numeratorXyXzYz = mR3(0.0); // Real denominator = 1e-9; // get address in grid int3 gridPos = calcGridPos(posRadA); Real3 inner_sum = mR3(0.0); // Real mi_bar = 0.0, r0 = 0.0; Real sum_w_i = W3h_GPU(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); int N_ = 1; int N_s = 0; for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { Real3 posRadB = mR3(sortedPosRad[j]); // Real3 dist3Alpha = posRadA - posRadB; Real3 dist3 = Distance(posRadA, posRadB); // change from B-A to A-B Real d = length(dist3); if (d > SuppRadii) continue; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) { // no rigid-rigid force continue; } Real invd = 1.0 / d; // modifyPressure(rhoPresMuB, dist3Alpha); // if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) { // printf("Error! particle rhoPresMuB is NAN: thrown from modifyPressure !\n"); // } Real3 velMasB = sortedVelMas[j]; if (rhoPresMuB.w > -1.0) { int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers); // if (!(bceIndexB >= 0 && // bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) { // printf("Error! bceIndex out of bound, collideCell !\n"); // } rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; velMasB = velMas_ModifiedBCE[bceIndexB]; } Real multViscosit = 1; // if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) { // printf("Error! particle rhoPresMuB is NAN: thrown from collideCell ! type=%f\n", // rhoPresMuB.w); // } // change from "-=" to "+=" if(paramsD.elastic_SPH){ Real3 gradW = GradWh(dist3, paramsD.HSML); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; gradW = gradW_new; derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA, velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit, sortedTauXxYyZz[index], sortedTauXyXzYz[index], sortedTauXxYyZz[j], sortedTauXyXzYz[j]); } else{ derivVelRho += DifVelocityRho(Gi, dist3, d, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA, velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit); preGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], -rhoPresMuA.y, rhoPresMuB.y, rhoPresMuA, rhoPresMuB); velxGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB); velyGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB); velzGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB); velxLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB); velyLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB); velzLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB); } if (d > paramsD.HSML*1.0e-9 && sum_w_i < paramsD.C_Wi) { // Real m_j = cube(sortedPosRad[j].w * paramsD.MULT_INITSPACE) * paramsD.rho0; // mi_bar += m_j; // r0 += d; // inner_sum += m_j * dist3 / (d * d * d); sum_w_i = sum_w_i + W3h_GPU(d, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); N_ = N_ + 1; } // find particles that have contact with this particle if(N_s < 12 && d < 2.0*radii){ Real Pen = (radii - d) * invRadii; Real3 r_0 = bsvdT * invd * dist3 ; Real3 r_s = r_0 * Pen; if (d < 1.0*radii) { inner_sum += 3.0*r_s; N_s = N_s + 1; } else if (d < 1.1*radii) { inner_sum += 1.0*r_s; N_s = N_s + 1; } else { inner_sum += 0.1 * 1.0 * (-r_0); N_s = N_s + 1; } } // posGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j], // square(posRadA.x), square(posRadB.x), rhoPresMuA, rhoPresMuB); // posLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j], // square(posRadA.x), square(posRadB.x), rhoPresMuA, rhoPresMuB); } } } } } if(paramsD.elastic_SPH){ if(sum_w_i < paramsD.C_Wi){ derivVelRho.w = -1.0; } else{ derivVelRho.w = 1.0; } } if(!paramsD.elastic_SPH){ Real nu = paramsD.mu0/paramsD.rho0; Real dvxdt = -preGra.x/rhoPresMuA.x + (velxLap.x + velxGra.x*velxLap.y + velxGra.y*velxLap.z + velxGra.z*velxLap.w) * nu; Real dvydt = -preGra.y/rhoPresMuA.x + (velyLap.x + velyGra.x*velyLap.y + velyGra.y*velyLap.z + velyGra.z*velyLap.w) * nu; Real dvzdt = -preGra.z/rhoPresMuA.x + (velzLap.x + velzGra.x*velzLap.y + velzGra.y*velzLap.z + velzGra.z*velzLap.w) * nu; Real drhodt = -paramsD.rho0*(velxGra.x + velyGra.y + velzGra.z); Real Det_G = (Gi[0] * Gi[4] * Gi[8] - Gi[0] * Gi[5] * Gi[7] - Gi[1] * Gi[3] * Gi[8] + Gi[1] * Gi[5] * Gi[6] + Gi[2] * Gi[3] * Gi[7] - Gi[2] * Gi[4] * Gi[6]); Real Det_L = (Li[0] * Li[4] * Li[8] - Li[0] * Li[5] * Li[7] - Li[1] * Li[3] * Li[8] + Li[1] * Li[5] * Li[6] + Li[2] * Li[3] * Li[7] - Li[2] * Li[4] * Li[6]); if(rhoPresMuA.w == -1){ if( Det_G > 0.9 && Det_G < 1.1 && Det_L > 0.9 && Det_L < 1.1 && sum_w_i > 0.9){ // printf("Det_G, Det_L %f %f %f %f %f %d\n", Det_G, Det_L, posRadA.x, posRadA.y, posRadA.z, N_); derivVelRho = mR4(dvxdt, dvydt, dvzdt, drhodt); } // Real dvdt =length(mR3(derivVelRho)); // Real coeff = 1000.0/dvdt; // if(dvdt > 1000.0){ // derivVelRho = mR4(dvxdt * coeff, dvydt * coeff, dvzdt * coeff, 0.0); // derivVelRho = mR4(0.0); // } } } if (!(isfinite(derivVelRho.x) && isfinite(derivVelRho.y) && isfinite(derivVelRho.z))) { printf("Error! particle derivVel is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n"); *isErrorD = true; } if (!(isfinite(derivVelRho.w))) { printf("Error! particle derivRho is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n"); *isErrorD = true; } // add gravity and other body force to fluid markers if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){ Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity; derivVelRho += mR4(totalFluidBodyForce3); } sortedDerivVelRho[index] = derivVelRho; // r0 /= N_; // mi_bar /= N_; // if (sum_w_i > 0.95 && sortedRhoPreMu[index].w < -0.5 ) // shift_r[index] = paramsD.beta_shifting * r0 * r0 * MaxVel * paramsD.dT * inner_sum / (mi_bar+1e-9); // else // shift_r[index] = mR3(0.0); Real det_r_max = length(0.05*velMasA*paramsD.dT); Real det_r_A = length(inner_sum); if(det_r_A < det_r_max){ shift_r[index] = inner_sum; } else{ shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9); } // shift_r[index] = mR3(0.0); // shift_r[index].y = 0.0; // if (sum_w_i < 0.95 && sortedRhoPreMu[index].w < -0.5) // printf("Finished in %f %f %f %f %f\n", sum_w_i, sortedPosRad[index].x, sortedPosRad[index].y, sortedPosRad[index].z, sortedRhoPreMu[index].w); } __global__ void NS_SSR( Real4* sortedDerivVelRho, Real3* sortedDerivTauXxYyZz, Real3* sortedDerivTauXyXzYz, Real3* shift_r, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real3* sortedTauXxYyZz, Real3* sortedTauXyXzYz, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){ sortedDerivVelRho[index] = mR4(0.0); sortedDerivTauXxYyZz[index] = mR3(0.0); sortedDerivTauXyXzYz[index] = mR3(0.0); return; } Real3 posRadA = mR3(sortedPosRad[index]); Real3 velMasA = sortedVelMas[index]; Real4 rhoPresMuA = sortedRhoPreMu[index]; Real hA = sortedPosRad[index].w; Real4 derivVelRho = mR4(0.0); Real3 deltaV = mR3(0); Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; uint j_list[150]; uint j_num = 0; // Get address in grid int3 gridPos = calcGridPos(posRadA); for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d < SuppRadii){ j_list[j_num] = j; j_num++; } } } } } } Real tauxx = sortedTauXxYyZz[index].x; Real tauyy = sortedTauXxYyZz[index].y; Real tauzz = sortedTauXxYyZz[index].z; Real tauxy = sortedTauXyXzYz[index].x; Real tauxz = sortedTauXyXzYz[index].y; Real tauyz = sortedTauXyXzYz[index].z; Real tauzx = tauxz; Real tauzy = tauyz; Real tauyx = tauxy; Real dTauxx = 0.0; Real dTauyy = 0.0; Real dTauzz = 0.0; Real dTauxy = 0.0; Real dTauxz = 0.0; Real dTauyz = 0.0; Real G_i[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0}; { Real mGi[9] = {0.0}; for(uint n = 0; n < j_num; n++){ uint j = j_list[n]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > SuppRadii || sortedRhoPreMu[j].w <= -2) continue; Real3 grad_i_wij = GradWh(rij, hA); Real3 grw_vj = grad_i_wij * paramsD.volume0; mGi[0] -= rij.x * grw_vj.x; mGi[1] -= rij.x * grw_vj.y; mGi[2] -= rij.x * grw_vj.z; mGi[3] -= rij.y * grw_vj.x; mGi[4] -= rij.y * grw_vj.y; mGi[5] -= rij.y * grw_vj.z; mGi[6] -= rij.z * grw_vj.x; mGi[7] -= rij.z * grw_vj.y; mGi[8] -= rij.z * grw_vj.z; } Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); if (abs(Det) > 0.01) { Real OneOverDet = 1.0 / Det; G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet; G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet; G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet; G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet; G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet; G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet; G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet; G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet; G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet; } } Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;//1.129;//1.241 Real invRadii = 1.0/radii; Real3 v_ab = (velMasA + velMasA)*0.5; Real v_ab_m = length(v_ab); Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ; Real3 inner_sum = mR3(0.0); Real sum_w_i = W3h_GPU(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); int N_ = 1; int N_s = 0; for(uint n = 0; n < j_num; n++){ uint j = j_list[n]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) { // no rigid-rigid force continue; } Real invd = 1.0 / d; Real3 velMasB = sortedVelMas[j]; if (rhoPresMuB.w > -1.0) { int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers); rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; velMasB = velMas_ModifiedBCE[bceIndexB]; } Real multViscosit = 1; // For granular material dynamics // Real rhoB = rhoPresMuB.x; Real hB = sortedPosRad[j].w; // Real mB = paramsD.markerMass; Real3 gradW = GradWh(dist3, (hA + hB) * 0.5); Real3 gradW_new; gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z; gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z; gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z; gradW = gradW_new; derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA, velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit, sortedTauXxYyZz[index], sortedTauXyXzYz[index], sortedTauXxYyZz[j], sortedTauXyXzYz[j]); if(sortedRhoPreMu[index].w < -0.5){ // start to calculate the stress rate Real Gm = paramsD.G_shear; // shear modulus of the material Real half_mB_over_rhoB = 0.5 * paramsD.volume0; //(mB / rhoB); Real3 velMasB_new = velMasB; if (rhoPresMuB.w > -1.0) velMasB_new = 2.0*velMasB - velMasA; // noslip BC Real3 vAB = velMasA - velMasB_new; Real3 vAB_h = vAB * half_mB_over_rhoB; // entries of strain rate tensor Real exx = -2.0 * vAB_h.x * gradW.x; Real eyy = -2.0 * vAB_h.y * gradW.y; Real ezz = -2.0 * vAB_h.z * gradW.z; Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x; Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x; Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y; // entries of rotation rate (spin) tensor // Real wxx = 0.0; // Real wyy = 0.0; // Real wzz = 0.0; Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x; Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x; Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y; Real wyx = -wxy; // Real wzx = -wxz; Real wzy = -wyz; Real edia = 1.0 / 3.0 * (exx + eyy + ezz); Real twoGm = 2.0 * Gm; Real K_edia = paramsD.K_bulk*1.0*edia; dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia; dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia; dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia; dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy); dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz); dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz); } // Do integration for the kernel function if (d > paramsD.HSML*1.0e-9) { Real Wab = W3h_GPU(d, sortedPosRad[index].w); sum_w_i = sum_w_i + Wab * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE); // XSPH if (rhoPresMuB.w > -1.5 && rhoPresMuB.w < -0.5){ deltaV += paramsD.volume0 * (velMasB - velMasA) * Wab; } N_ = N_ + 1; } // Find particles that have contact with this particle if(N_s < 12 && d < 2.0*radii){ Real Pen = (radii - d) * invRadii; Real3 r_0 = bsvdT * invd * dist3 ; Real3 r_s = r_0 * Pen; if (d < 1.0*radii) { inner_sum += 3.0*r_s; N_s = N_s + 1; } else if (d < 1.1*radii) { inner_sum += 1.0*r_s; N_s = N_s + 1; } else { inner_sum += 0.1 * 1.0 * (-r_0); N_s = N_s + 1; } } } // Check particles who have not enough neighbor particles (only for granular now) if(sum_w_i < paramsD.C_Wi){ derivVelRho.w = -1.0; } else{ derivVelRho.w = 1.0; } // Calculate the shifting vector Real det_r_max = length(0.05*velMasA*paramsD.dT); Real det_r_A = length(inner_sum); if(det_r_A < det_r_max){ shift_r[index] = inner_sum; } else{ shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9); } shift_r[index] += deltaV * paramsD.dT; shift_r[index] = shift_r[index] * (1.0 / paramsD.dT); // add gravity other body force to fluid markers if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){ Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity; derivVelRho += mR4(totalFluidBodyForce3); } sortedDerivVelRho[index] = derivVelRho; sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz); sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void CalcVel_XSPH_D(Real3* vel_XSPH_Sorted_D, // output: new velocity Real4* sortedPosRad_old, // input: sorted positions Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, // input: sorted velocities Real4* sortedRhoPreMu, Real3* shift_r, uint* gridMarkerIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numAllMarkers) return; Real4 rhoPreMuA = sortedRhoPreMu[index]; Real3 velMasA = sortedVelMas[index]; Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML; Real3 posRadA = mR3(sortedPosRad_old[index]); Real3 deltaV = mR3(0); // get address in grid int3 gridPos = calcGridPos(posRadA); Real3 inner_sum = mR3(0.0); // Real mi_bar = 0.0, r0 = 0.0; Real3 dV = mR3(0.0f); // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) { // check not colliding with self Real3 posRadB = mR3(sortedPosRad_old[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > SuppRadii) continue; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (rhoPresMuB.w != -1.0) continue; Real3 velMasB = sortedVelMas[j]; Real rho_bar = 0.5 * (rhoPreMuA.x + rhoPresMuB.x); deltaV += paramsD.markerMass * (velMasB - velMasA) * W3h_GPU(d, (sortedPosRad_old[index].w + sortedPosRad_old[j].w) * 0.5) / rho_bar; } } } } } vel_XSPH_Sorted_D[index] = deltaV + shift_r[index]*(1.0/paramsD.dT); // sortedPosRad[index] += mR4(shift_r[index], 0.0); // if (!(isfinite(vel_XSPH_Sorted_D[index].x) && isfinite(vel_XSPH_Sorted_D[index].y) && isfinite(vel_XSPH_Sorted_D[index].z))) { printf("Error! particle vXSPH is NAN: thrown from ChFsiForceExplicitSPH.cu, newVel_XSPH_D !\n"); *isErrorD = true; } } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForceExplicitSPH::ChFsiForceExplicitSPH(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects) : ChFsiForce(otherBceWorker, otherSortedSphMarkersD, otherMarkersProximityD, otherFsiGeneralData, otherParamsH, otherNumObjects) { CopyParams_NumberOfObjects(paramsH, numObjectsH); density_initialization = 0; } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForceExplicitSPH::~ChFsiForceExplicitSPH() {} //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::Finalize() { ChFsiForce::Finalize(); cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); cudaDeviceSynchronize(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD, std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD) { sphMarkersD = otherSphMarkersD; fsiCollisionSystem->ArrangeData(sphMarkersD); bceWorker->ModifyBceVelocity(sphMarkersD, otherFsiBodiesD); CollideWrapper(); CalculateXSPH_velocity(); // AddGravityToFluid(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::CollideWrapper() { bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); //------------------------------------------------------------------------ // thread per particle uint numThreads, numBlocks; computeGridSize((int)numObjectsH->numAllMarkers, 128, numBlocks, numThreads); /* Execute the kernel */ // thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers); thrust::device_vector<Real4> sortedDerivVelRho(numObjectsH->numAllMarkers); thrust::device_vector<Real3> sortedDerivTauXxYyZz(numObjectsH->numAllMarkers); thrust::device_vector<Real3> sortedDerivTauXyXzYz(numObjectsH->numAllMarkers); shift_r.resize(numObjectsH->numAllMarkers); // thrust::fill(_sumWij_rhoi.begin(), _sumWij_rhoi.end(), 0.); // thrust::fill(shift_r.begin(), shift_r.end(), mR3(0.0)); // thrust::fill(sortedDerivVelRho.begin(), sortedDerivVelRho.end(), mR4(0.0)); // thrust::fill(sortedDerivTauXxYyZz.begin(), sortedDerivTauXxYyZz.end(), mR3(0.0)); // thrust::fill(sortedDerivTauXyXzYz.begin(), sortedDerivTauXyXzYz.end(), mR3(0.0)); // thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD; if (density_initialization == 0){ thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers); thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD; printf("Re-initializing density after %d steps.\n", paramsH->densityReinit); calcRho_kernel<<<numBlocks, numThreads>>>( mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR4CAST(rhoPresMuD_old), R1CAST(_sumWij_rhoi), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, density_initialization, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcRho_kernel"); } if(paramsH->elastic_SPH){ // execute the kernel Navier_Stokes and Shear_Stress_Rate in one kernel *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); // execute the kernel NS_SSR<<<numBlocks, numThreads>>>( mR4CAST(sortedDerivVelRho),mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz), mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE), mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD), U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes and Shear_Stress_Rate"); } else{ // EOS<<<numBlocks, numThreads>>>(mR4CAST(sortedSphMarkersD->rhoPresMuD), // numObjectsH->numAllMarkers, isErrorD); // ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "EOS"); thrust::device_vector<Real3>::iterator iter = thrust::max_element(sortedSphMarkersD->velMasD.begin(), sortedSphMarkersD->velMasD.end(), compare_Real3_mag()); ////unsigned int position = iter - sortedSphMarkersD->velMasD.begin(); Real MaxVel = length(*iter); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); // execute the kernel Navier_Stokes<<<numBlocks, numThreads>>>( mR4CAST(sortedDerivVelRho), mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE), mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD), // U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, MaxVel, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes"); } CopySortedToOriginal_Invasive_R4(fsiGeneralData->derivVelRhoD_old, sortedDerivVelRho, markersProximityD->gridMarkerIndexD); if(paramsH->elastic_SPH){ CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXxYyZzD, sortedDerivTauXxYyZz, markersProximityD->gridMarkerIndexD); CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXyXzYzD, sortedDerivTauXyXzYz, markersProximityD->gridMarkerIndexD); } sortedDerivVelRho.clear(); sortedDerivTauXxYyZz.clear(); sortedDerivTauXyXzYz.clear(); cudaFree(isErrorD); free(isErrorH); density_initialization++; if (density_initialization >= paramsH->densityReinit) density_initialization = 0; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::CalculateXSPH_velocity() { /* Calculate vel_XSPH */ if (vel_XSPH_Sorted_D.size() != numObjectsH->numAllMarkers) { printf("vel_XSPH_Sorted_D.size() %zd numObjectsH->numAllMarkers %zd \n", vel_XSPH_Sorted_D.size(), numObjectsH->numAllMarkers); throw std::runtime_error( "Error! size error vel_XSPH_Sorted_D Thrown from " "CalculateXSPH_velocity!\n"); } bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); //------------------------------------------------------------------------ if(paramsH->elastic_SPH){ // The XSPH vector already included in the shifting vector CopySortedToOriginal_Invasive_R3(fsiGeneralData->vel_XSPH_D, shift_r, markersProximityD->gridMarkerIndexD); } else{ /* thread per particle */ uint numThreads, numBlocks; computeGridSize((uint)numObjectsH->numAllMarkers, 128, numBlocks, numThreads); thrust::device_vector<Real4> sortedPosRad_old = sortedSphMarkersD->posRadD; thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0)); /* Execute the kernel */ CalcVel_XSPH_D<<<numBlocks, numThreads>>>( mR3CAST(vel_XSPH_Sorted_D), mR4CAST(sortedPosRad_old), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(shift_r), U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD); ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "CalcVel_XSPH_D"); CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D, markersProximityD->gridMarkerIndexD); // CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD, markersProximityD->gridMarkerIndexD); } if (density_initialization % paramsH->densityReinit == 0) CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD, sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD); cudaFree(isErrorD); free(isErrorH); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForceExplicitSPH::AddGravityToFluid() { // add gravity to fluid markers /* Add outside forces. Don't add gravity to rigids, BCE, and boundaries, it is * added in ChSystem */ Real3 totalFluidBodyForce3 = paramsH->bodyForce3 + paramsH->gravity; thrust::device_vector<Real4> bodyForceD(numObjectsH->numAllMarkers); thrust::fill(bodyForceD.begin(), bodyForceD.end(), mR4(totalFluidBodyForce3)); thrust::transform( fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x, fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].y, bodyForceD.begin(), fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x, thrust::plus<Real4>()); bodyForceD.clear(); } } // namespace fsi } // namespace chrono //================================================================================================================================
40e7ee4806f2e1ba9d9bc3068127504ac7cfa342.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include "gputimer.h" #include <time.h> unsigned int filter_radius; GpuTimer timer; float overal_time = 0; clock_t start, end; double overal_CPU_time; #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) #define accuracy 0.00005 #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(double *h_Dst, double *h_Src, double *h_Filter,int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(double *h_Dst, double *h_Src, double *h_Filter,int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Device code //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionRowDevice(double *d_Dst, double *d_Src, double *d_Filter,int imageW, int imageH, int filterR) { int k; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = row + k; if (d >= 0 && d < imageW) { //sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; sum += d_Src[col * imageW + d] * d_Filter[filterR - k]; } //h_Dst[y * imageW + x] = sum; d_Dst[col * imageW + row] = sum; } } __global__ void convolutionColumnDevice(double *d_Dst, double *d_Src, double *d_Filter,int imageW, int imageH, int filterR) { int k; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = col + k; if (d >= 0 && d < imageH) { //sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; sum += d_Src[d * imageW + row] * d_Filter[filterR -k]; } //h_Dst[y * imageW + x] = sum; d_Dst[col * imageW + row] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { double *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; double *d_Filter, *d_Input, *d_Buffer, *d_OutputD; int imageW; int imageH; unsigned int N; unsigned int i; // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. if ( argc != 3){ printf("Missmach in argument input \n"); printf("1st argument: Image Size \n 2nd argument: Filter Radius \n"); return 0; } filter_radius = atoi(argv[1]); N = atoi(argv[2]); imageH = N; imageW = N; if ( N < FILTER_LENGTH || N%2 != 0 ){ printf ( "Wrong image size \n"); printf ( "It should be greater than %d and a power of 2 \n", FILTER_LENGTH); return 0; } printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating host arrays...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... // Host mallocs h_Filter = (double *)malloc(FILTER_LENGTH * sizeof(double)); h_Input = (double *)malloc(imageW * imageH * sizeof(double)); h_Buffer = (double *)malloc(imageW * imageH * sizeof(double)); h_OutputCPU = (double *)malloc(imageW * imageH * sizeof(double)); h_OutputGPU = (double *)malloc(imageW * imageH * sizeof(double)); if ( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL) { fprintf(stderr, "Failed to allocate Host matrices!\n"); exit(EXIT_FAILURE); } printf("Allocating Device arrays...\n"); // Device mallocs d_Filter = NULL; hipMalloc((void **)&d_Filter, FILTER_LENGTH * sizeof(double)); cudaCheckError(); d_Input = NULL; hipMalloc((void **)&d_Input, imageW * imageH * sizeof(double)); cudaCheckError(); d_Buffer = NULL; hipMalloc((void **)&d_Buffer, imageW * imageH * sizeof(double)); cudaCheckError(); d_OutputD = NULL; hipMalloc((void **)&d_OutputD, imageW * imageH * sizeof(double)); cudaCheckError(); // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. printf("Initializing Host arrays...\n"); srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (double)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (double)rand() / ((double)RAND_MAX / 255) + (double)rand() / (double)RAND_MAX; } printf("Initializing Device arrays...\n"); // Transfer Data to Device timer.Start(); hipMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(double), hipMemcpyHostToDevice); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); timer.Start(); hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(double), hipMemcpyHostToDevice); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); start = clock(); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles end = clock(); // Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia // pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas printf("GPU computation...\n"); // Kernel paramiters prep int threadsPerBlock; if (N >= 32){ threadsPerBlock = 32; }else{ threadsPerBlock = N; } dim3 threads(threadsPerBlock, threadsPerBlock); int blocksPerGrid; if ( N>=32){ blocksPerGrid = N/threads.x; }else{ blocksPerGrid = 1; } dim3 grid(blocksPerGrid,blocksPerGrid); // convolution by rows device printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock); timer.Start(); hipLaunchKernelGGL(( convolutionRowDevice), dim3(grid), dim3(threads), 0, 0, d_Buffer, d_Input, d_Filter, imageW, imageH, filter_radius); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); hipDeviceSynchronize(); cudaCheckError(); // convolution by columns device printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock); timer.Start(); hipLaunchKernelGGL(( convolutionColumnDevice), dim3(grid), dim3(threads), 0, 0, d_OutputD, d_Buffer, d_Filter, imageW, imageH, filter_radius); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); hipDeviceSynchronize(); cudaCheckError(); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); timer.Start(); hipMemcpy(h_OutputGPU, d_OutputD, imageW * imageH * sizeof(double), hipMemcpyDeviceToHost); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); printf("\nComparing the outputs\n"); double max_diff=0, temp; for (unsigned i = 0; i < imageW * imageH; i++) { temp = ABS(h_OutputCPU[i] - h_OutputGPU[i]); if (max_diff < temp) { max_diff = temp; } if ( max_diff > accuracy){ printf("The accuracy is not good enough\n" ); break; } } printf("Max diff: %g\n\n", max_diff); printf("Time elapsed = %g ms\n", overal_time); overal_CPU_time = (double)(end - start) * 1000.0 / CLOCKS_PER_SEC ; printf ("Time elapsed on CPU = %g ms\n", overal_CPU_time); // free all the allocated memory free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Filter); hipFree(d_OutputD); cudaCheckError(); hipFree(d_Buffer); cudaCheckError(); hipFree(d_Input); cudaCheckError(); hipFree(d_Filter); cudaCheckError(); // Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA hipDeviceReset(); return 0; }
40e7ee4806f2e1ba9d9bc3068127504ac7cfa342.cu
/* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include "gputimer.h" #include <time.h> unsigned int filter_radius; GpuTimer timer; float overal_time = 0; clock_t start, end; double overal_CPU_time; #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) #define accuracy 0.00005 #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(double *h_Dst, double *h_Src, double *h_Filter,int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(double *h_Dst, double *h_Src, double *h_Filter,int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Device code //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionRowDevice(double *d_Dst, double *d_Src, double *d_Filter,int imageW, int imageH, int filterR) { int k; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = row + k; if (d >= 0 && d < imageW) { //sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; sum += d_Src[col * imageW + d] * d_Filter[filterR - k]; } //h_Dst[y * imageW + x] = sum; d_Dst[col * imageW + row] = sum; } } __global__ void convolutionColumnDevice(double *d_Dst, double *d_Src, double *d_Filter,int imageW, int imageH, int filterR) { int k; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0; for (k = -filterR; k <= filterR; k++) { int d = col + k; if (d >= 0 && d < imageH) { //sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; sum += d_Src[d * imageW + row] * d_Filter[filterR -k]; } //h_Dst[y * imageW + x] = sum; d_Dst[col * imageW + row] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { double *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; double *d_Filter, *d_Input, *d_Buffer, *d_OutputD; int imageW; int imageH; unsigned int N; unsigned int i; // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. if ( argc != 3){ printf("Missmach in argument input \n"); printf("1st argument: Image Size \n 2nd argument: Filter Radius \n"); return 0; } filter_radius = atoi(argv[1]); N = atoi(argv[2]); imageH = N; imageW = N; if ( N < FILTER_LENGTH || N%2 != 0 ){ printf ( "Wrong image size \n"); printf ( "It should be greater than %d and a power of 2 \n", FILTER_LENGTH); return 0; } printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating host arrays...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... // Host mallocs h_Filter = (double *)malloc(FILTER_LENGTH * sizeof(double)); h_Input = (double *)malloc(imageW * imageH * sizeof(double)); h_Buffer = (double *)malloc(imageW * imageH * sizeof(double)); h_OutputCPU = (double *)malloc(imageW * imageH * sizeof(double)); h_OutputGPU = (double *)malloc(imageW * imageH * sizeof(double)); if ( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL) { fprintf(stderr, "Failed to allocate Host matrices!\n"); exit(EXIT_FAILURE); } printf("Allocating Device arrays...\n"); // Device mallocs d_Filter = NULL; cudaMalloc((void **)&d_Filter, FILTER_LENGTH * sizeof(double)); cudaCheckError(); d_Input = NULL; cudaMalloc((void **)&d_Input, imageW * imageH * sizeof(double)); cudaCheckError(); d_Buffer = NULL; cudaMalloc((void **)&d_Buffer, imageW * imageH * sizeof(double)); cudaCheckError(); d_OutputD = NULL; cudaMalloc((void **)&d_OutputD, imageW * imageH * sizeof(double)); cudaCheckError(); // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. printf("Initializing Host arrays...\n"); srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (double)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (double)rand() / ((double)RAND_MAX / 255) + (double)rand() / (double)RAND_MAX; } printf("Initializing Device arrays...\n"); // Transfer Data to Device timer.Start(); cudaMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(double), cudaMemcpyHostToDevice); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); timer.Start(); cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(double), cudaMemcpyHostToDevice); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); start = clock(); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles end = clock(); // Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia // pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas printf("GPU computation...\n"); // Kernel paramiters prep int threadsPerBlock; if (N >= 32){ threadsPerBlock = 32; }else{ threadsPerBlock = N; } dim3 threads(threadsPerBlock, threadsPerBlock); int blocksPerGrid; if ( N>=32){ blocksPerGrid = N/threads.x; }else{ blocksPerGrid = 1; } dim3 grid(blocksPerGrid,blocksPerGrid); // convolution by rows device printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock); timer.Start(); convolutionRowDevice<<<grid, threads>>>(d_Buffer, d_Input, d_Filter, imageW, imageH, filter_radius); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); cudaDeviceSynchronize(); cudaCheckError(); // convolution by columns device printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock); timer.Start(); convolutionColumnDevice<<<grid, threads>>>(d_OutputD, d_Buffer, d_Filter, imageW, imageH, filter_radius); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); cudaDeviceSynchronize(); cudaCheckError(); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); timer.Start(); cudaMemcpy(h_OutputGPU, d_OutputD, imageW * imageH * sizeof(double), cudaMemcpyDeviceToHost); timer.Stop(); overal_time = overal_time + timer.Elapsed(); cudaCheckError(); printf("\nComparing the outputs\n"); double max_diff=0, temp; for (unsigned i = 0; i < imageW * imageH; i++) { temp = ABS(h_OutputCPU[i] - h_OutputGPU[i]); if (max_diff < temp) { max_diff = temp; } if ( max_diff > accuracy){ printf("The accuracy is not good enough\n" ); break; } } printf("Max diff: %g\n\n", max_diff); printf("Time elapsed = %g ms\n", overal_time); overal_CPU_time = (double)(end - start) * 1000.0 / CLOCKS_PER_SEC ; printf ("Time elapsed on CPU = %g ms\n", overal_CPU_time); // free all the allocated memory free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Filter); cudaFree(d_OutputD); cudaCheckError(); cudaFree(d_Buffer); cudaCheckError(); cudaFree(d_Input); cudaCheckError(); cudaFree(d_Filter); cudaCheckError(); // Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA cudaDeviceReset(); return 0; }
b5fe3c30843a651c645515be7647448a0274d98f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "optimizedSortRows.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *image = NULL; hipMalloc(&image, XSIZE*YSIZE); int imageHeight = YSIZE; int imageWidth = XSIZE; int colorMode = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( optimizedSortRows), dim3(gridBlock),dim3(threadBlock), 0, 0, image,imageHeight,imageWidth,colorMode); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( optimizedSortRows), dim3(gridBlock),dim3(threadBlock), 0, 0, image,imageHeight,imageWidth,colorMode); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( optimizedSortRows), dim3(gridBlock),dim3(threadBlock), 0, 0, image,imageHeight,imageWidth,colorMode); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b5fe3c30843a651c645515be7647448a0274d98f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "optimizedSortRows.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *image = NULL; cudaMalloc(&image, XSIZE*YSIZE); int imageHeight = YSIZE; int imageWidth = XSIZE; int colorMode = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); optimizedSortRows<<<gridBlock,threadBlock>>>(image,imageHeight,imageWidth,colorMode); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { optimizedSortRows<<<gridBlock,threadBlock>>>(image,imageHeight,imageWidth,colorMode); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { optimizedSortRows<<<gridBlock,threadBlock>>>(image,imageHeight,imageWidth,colorMode); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7c946b4bef0da05fa5260cd57f57e3e18c850a19.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/color.hpp" #include "cvt_color_internal.h" namespace cv { namespace cuda { namespace device { OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_x = 8 }; enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, traits) \ void name(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream) \ { \ traits::functor_type functor = traits::create_functor(); \ typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::result_type dst_t; \ cv::cuda::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \ } #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, name ## _traits) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra) #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
7c946b4bef0da05fa5260cd57f57e3e18c850a19.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/color.hpp" #include "cvt_color_internal.h" namespace cv { namespace cuda { namespace device { OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_x = 8 }; enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, traits) \ void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) \ { \ traits::functor_type functor = traits::create_functor(); \ typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::result_type dst_t; \ cv::cuda::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \ } #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, name ## _traits) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra) #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
69714810dcd9d496f7eee121c10cf5bf09ae4af3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #define BLOCK_SIZE 256 // histogram kernel with atomic addition and privitization __global__ void hist_GPU(int* d_vec, int* d_hist, int BinNum, int VecDim) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // get the bin size int BinSize = 1024 / BinNum; // allocates a shared memory array extern __shared__ int histo_s[]; for (unsigned int binIdx = threadIdx.x; binIdx < BinNum; binIdx += blockDim.x) { histo_s[binIdx] = 0; } __syncthreads(); // implement atomic addition for (unsigned int i = tid; i < VecDim; i += blockDim.x * gridDim.x) { atomicAdd(&(histo_s[d_vec[i] / BinSize]), 1); } __syncthreads(); // commit to global memory for (unsigned int binIdx = threadIdx.x; binIdx < BinNum; binIdx += blockDim.x) { atomicAdd(&(d_hist[binIdx]), histo_s[binIdx]); } } // histogram in CPU version, to comfirm whether the result is correct void hist_CPU(int* vector, int* hist_cpu, int BinNum, int VecDim) { int BinSize = 1024 / BinNum; for (int i = 0; i < VecDim; ++i) { ++hist_cpu[vector[i] / BinSize]; } return; } // check whether the input parameters are in the proper range int check_input(int BinNum, int VecDim) { if ((BinNum & (BinNum - 1)) != 0) { printf("Invalid <BinNum> \n"); printf("<BinNum> must be 2 ^ n, and 2 < n < 8 \n"); return -1; } if (VecDim < 0) { printf("Invalid <VecDim> \n"); printf("<VecDim> must >= 0 \n"); return -1; } return 1; } int main(int argc, char* argv[]) { if (argc != 4) { printf("Input error! \n"); printf("Please input <BinNum> and <VecDim> \n"); return 0; } if (argc == 4 && (strcmp(argv[1], "-i") == 0)) { printf("Inputing Data...\n"); } else { printf("Correct input Format: ./histogram_atomic -i binNum vecNum\n"); return -1; } int BinNum = atoi(argv[2]); int VecDim = atoi(argv[3]); if (check_input(BinNum, VecDim) == 1) { printf("BinNum = %d, VecDim = %d.\n", BinNum, VecDim); } else { return -1; } // initialize vector int* vector; hipHostMalloc((void**)&vector, sizeof(int) * VecDim); // generate input vector srand((unsigned)time(NULL)); for (int i = 0; i < VecDim; i++) { vector[i] = rand() % 1024; } // allocate memory on host for saving results int* hist_cpu = (int*)calloc(VecDim, sizeof(int)); int* hist_gpu = (int*)calloc(VecDim, sizeof(int)); // allocate memory on device int* d_vec, * d_hist; hipMalloc((void**)&d_vec, sizeof(int) * VecDim); hipMalloc((void**)&d_hist, sizeof(int) * BinNum); // transfer vector from host to device hipMemcpy(d_vec, vector, sizeof(int) * VecDim, hipMemcpyHostToDevice); hipMemset(d_hist, 0, BinNum); // prepare for recording the run time float gpu_time_ms; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(128); // implement GPU version histogram and record the run time hist_GPU << < dimGrid, dimBlock, sizeof(int)* BinNum >> > (d_vec, d_hist, BinNum, VecDim); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&gpu_time_ms, start, stop); // copy the result to the host hipMemcpy(hist_gpu, d_hist, sizeof(int) * BinNum, hipMemcpyDeviceToHost); // implement the CPU version histogram hist_CPU(vector, hist_cpu, BinNum, VecDim); // validate results computed by GPU with shared memory int all_ok = 1; for (int i = 0; i < BinNum; i++) { if (hist_gpu[i] != hist_cpu[i]) { all_ok = 0; } } if (all_ok == 1) { printf("Results from GPU and CPU are matched.\n"); } else { printf("The result is incorrect!\n"); } // performance analysis printf("Performance: %f ms. Throughput = %.4f MB/s.\n", gpu_time_ms, 1.0e-3 * (double)VecDim / gpu_time_ms); // free memory hipFree(d_vec); hipFree(d_hist); hipHostFree(vector); hipHostFree(hist_cpu); hipHostFree(hist_gpu); return 0; }
69714810dcd9d496f7eee121c10cf5bf09ae4af3.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #define BLOCK_SIZE 256 // histogram kernel with atomic addition and privitization __global__ void hist_GPU(int* d_vec, int* d_hist, int BinNum, int VecDim) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // get the bin size int BinSize = 1024 / BinNum; // allocates a shared memory array extern __shared__ int histo_s[]; for (unsigned int binIdx = threadIdx.x; binIdx < BinNum; binIdx += blockDim.x) { histo_s[binIdx] = 0; } __syncthreads(); // implement atomic addition for (unsigned int i = tid; i < VecDim; i += blockDim.x * gridDim.x) { atomicAdd(&(histo_s[d_vec[i] / BinSize]), 1); } __syncthreads(); // commit to global memory for (unsigned int binIdx = threadIdx.x; binIdx < BinNum; binIdx += blockDim.x) { atomicAdd(&(d_hist[binIdx]), histo_s[binIdx]); } } // histogram in CPU version, to comfirm whether the result is correct void hist_CPU(int* vector, int* hist_cpu, int BinNum, int VecDim) { int BinSize = 1024 / BinNum; for (int i = 0; i < VecDim; ++i) { ++hist_cpu[vector[i] / BinSize]; } return; } // check whether the input parameters are in the proper range int check_input(int BinNum, int VecDim) { if ((BinNum & (BinNum - 1)) != 0) { printf("Invalid <BinNum> \n"); printf("<BinNum> must be 2 ^ n, and 2 < n < 8 \n"); return -1; } if (VecDim < 0) { printf("Invalid <VecDim> \n"); printf("<VecDim> must >= 0 \n"); return -1; } return 1; } int main(int argc, char* argv[]) { if (argc != 4) { printf("Input error! \n"); printf("Please input <BinNum> and <VecDim> \n"); return 0; } if (argc == 4 && (strcmp(argv[1], "-i") == 0)) { printf("Inputing Data...\n"); } else { printf("Correct input Format: ./histogram_atomic -i binNum vecNum\n"); return -1; } int BinNum = atoi(argv[2]); int VecDim = atoi(argv[3]); if (check_input(BinNum, VecDim) == 1) { printf("BinNum = %d, VecDim = %d.\n", BinNum, VecDim); } else { return -1; } // initialize vector int* vector; cudaMallocHost((void**)&vector, sizeof(int) * VecDim); // generate input vector srand((unsigned)time(NULL)); for (int i = 0; i < VecDim; i++) { vector[i] = rand() % 1024; } // allocate memory on host for saving results int* hist_cpu = (int*)calloc(VecDim, sizeof(int)); int* hist_gpu = (int*)calloc(VecDim, sizeof(int)); // allocate memory on device int* d_vec, * d_hist; cudaMalloc((void**)&d_vec, sizeof(int) * VecDim); cudaMalloc((void**)&d_hist, sizeof(int) * BinNum); // transfer vector from host to device cudaMemcpy(d_vec, vector, sizeof(int) * VecDim, cudaMemcpyHostToDevice); cudaMemset(d_hist, 0, BinNum); // prepare for recording the run time float gpu_time_ms; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(128); // implement GPU version histogram and record the run time hist_GPU << < dimGrid, dimBlock, sizeof(int)* BinNum >> > (d_vec, d_hist, BinNum, VecDim); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_time_ms, start, stop); // copy the result to the host cudaMemcpy(hist_gpu, d_hist, sizeof(int) * BinNum, cudaMemcpyDeviceToHost); // implement the CPU version histogram hist_CPU(vector, hist_cpu, BinNum, VecDim); // validate results computed by GPU with shared memory int all_ok = 1; for (int i = 0; i < BinNum; i++) { if (hist_gpu[i] != hist_cpu[i]) { all_ok = 0; } } if (all_ok == 1) { printf("Results from GPU and CPU are matched.\n"); } else { printf("The result is incorrect!\n"); } // performance analysis printf("Performance: %f ms. Throughput = %.4f MB/s.\n", gpu_time_ms, 1.0e-3 * (double)VecDim / gpu_time_ms); // free memory cudaFree(d_vec); cudaFree(d_hist); cudaFreeHost(vector); cudaFreeHost(hist_cpu); cudaFreeHost(hist_gpu); return 0; }
49c432ece62f32e30d8aea4a6fa63e32d19e0e00.hip
// !!! This is a file automatically generated by hipify!!! #include "OperatorKernels.h" #include <hip/hip_runtime.h> #include <math.h> __global__ void DeviceProceedVector(double* out, const double* in, OperatorElement* oper, const unsigned int dim, const unsigned int Nfibs) { unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < dim) { out[id] = 0; OperatorElement* m_oper = oper + 25 * id; for(unsigned int ind = 0; ind < 25; ind ++) { out[id] += m_oper[ind].coe * in[m_oper[ind].ind1] * in[m_oper[ind].ind2] * in[m_oper[ind].ind3]; } } } void ProceedVector(double* out, const double* in, OperatorElement* oper, const unsigned int& dim, const unsigned int& Nfibs) { hipLaunchKernelGGL(( DeviceProceedVector), dim3(dim/128 +1), dim3(128), 0, 0, out, in, oper, dim, Nfibs); }
49c432ece62f32e30d8aea4a6fa63e32d19e0e00.cu
#include "OperatorKernels.h" #include <cuda_runtime.h> #include <math.h> __global__ void DeviceProceedVector(double* out, const double* in, OperatorElement* oper, const unsigned int dim, const unsigned int Nfibs) { unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < dim) { out[id] = 0; OperatorElement* m_oper = oper + 25 * id; for(unsigned int ind = 0; ind < 25; ind ++) { out[id] += m_oper[ind].coe * in[m_oper[ind].ind1] * in[m_oper[ind].ind2] * in[m_oper[ind].ind3]; } } } void ProceedVector(double* out, const double* in, OperatorElement* oper, const unsigned int& dim, const unsigned int& Nfibs) { DeviceProceedVector<<<dim/128 +1, 128>>>(out, in, oper, dim, Nfibs); }
ee73540daabb2ef7e0716e862e07e2e4a49348ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <torch/torch.h> #include <vector> #include <iostream> namespace rcnn{ namespace layers{ __global__ void box_encode_kernel(float *targets_dx, float *targets_dy, float *targets_dw, float *targets_dh, float4 *boxes, float4 *anchors, float wx, float wy, float ww, float wh, size_t gt, size_t idxJump) { int idx = blockIdx.x * blockDim.x + threadIdx.x; size_t row_offset; float anchors_x1, anchors_x2, anchors_y1, anchors_y2, boxes_x1, boxes_x2, boxes_y1, boxes_y2, ex_w, ex_h, ex_ctr_x, ex_ctr_y, gt_w, gt_h, gt_ctr_x, gt_ctr_y; for (int i = idx; i < gt; i += idxJump){ row_offset = i; anchors_x1 = anchors[row_offset].x; anchors_y1 = anchors[row_offset].y; anchors_x2 = anchors[row_offset].z; anchors_y2 = anchors[row_offset].w; boxes_x1 = boxes[row_offset].x; boxes_y1 = boxes[row_offset].y; boxes_x2 = boxes[row_offset].z; boxes_y2 = boxes[row_offset].w; ex_w = anchors_x2 - anchors_x1 + 1; ex_h = anchors_y2 - anchors_y1 + 1; ex_ctr_x = anchors_x1 + 0.5 * ex_w; ex_ctr_y = anchors_y1 + 0.5 * ex_h; gt_w = boxes_x2 - boxes_x1 + 1; gt_h = boxes_y2 - boxes_y1 + 1; gt_ctr_x = boxes_x1 + 0.5 * gt_w; gt_ctr_y = boxes_y1 + 0.5 * gt_h; targets_dx[i] = wx * (gt_ctr_x - ex_ctr_x) / ex_w; targets_dy[i] = wy * (gt_ctr_y - ex_ctr_y) / ex_h; targets_dw[i] = ww * log(gt_w / ex_w); targets_dh[i] = wh * log(gt_h / ex_h); } } std::vector<torch::Tensor> box_encode_cuda(torch::Tensor boxes, torch::Tensor anchors, float wx, float wy, float ww, float wh){ int minGridSize; int blockSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*) box_encode_kernel, 0, // dynamic memory 0); // maximum utilized threads long size = boxes.size(0); auto targets_dx = torch::ones({size}, torch::CUDA(torch::kFloat)); auto targets_dy = torch::ones({size}, torch::CUDA(torch::kFloat)); auto targets_dw = torch::ones({size}, torch::CUDA(torch::kFloat)); auto targets_dh = torch::ones({size}, torch::CUDA(torch::kFloat)); dim3 gridDim(minGridSize); dim3 blockDim(blockSize); int idxJump = minGridSize * blockSize; auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( box_encode_kernel), dim3(gridDim),dim3(blockDim),0,stream.stream(), targets_dx.data<float>(), targets_dy.data<float>(), targets_dw.data<float>(), targets_dh.data<float>(), (float4*) boxes.data<float>(), (float4*) anchors.data<float>(), wx, wy, ww, wh, size, idxJump); std::vector<torch::Tensor> result; result.push_back(targets_dx); result.push_back(targets_dy); result.push_back(targets_dw); result.push_back(targets_dh); return result; } } }
ee73540daabb2ef7e0716e862e07e2e4a49348ae.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <torch/torch.h> #include <vector> #include <iostream> namespace rcnn{ namespace layers{ __global__ void box_encode_kernel(float *targets_dx, float *targets_dy, float *targets_dw, float *targets_dh, float4 *boxes, float4 *anchors, float wx, float wy, float ww, float wh, size_t gt, size_t idxJump) { int idx = blockIdx.x * blockDim.x + threadIdx.x; size_t row_offset; float anchors_x1, anchors_x2, anchors_y1, anchors_y2, boxes_x1, boxes_x2, boxes_y1, boxes_y2, ex_w, ex_h, ex_ctr_x, ex_ctr_y, gt_w, gt_h, gt_ctr_x, gt_ctr_y; for (int i = idx; i < gt; i += idxJump){ row_offset = i; anchors_x1 = anchors[row_offset].x; anchors_y1 = anchors[row_offset].y; anchors_x2 = anchors[row_offset].z; anchors_y2 = anchors[row_offset].w; boxes_x1 = boxes[row_offset].x; boxes_y1 = boxes[row_offset].y; boxes_x2 = boxes[row_offset].z; boxes_y2 = boxes[row_offset].w; ex_w = anchors_x2 - anchors_x1 + 1; ex_h = anchors_y2 - anchors_y1 + 1; ex_ctr_x = anchors_x1 + 0.5 * ex_w; ex_ctr_y = anchors_y1 + 0.5 * ex_h; gt_w = boxes_x2 - boxes_x1 + 1; gt_h = boxes_y2 - boxes_y1 + 1; gt_ctr_x = boxes_x1 + 0.5 * gt_w; gt_ctr_y = boxes_y1 + 0.5 * gt_h; targets_dx[i] = wx * (gt_ctr_x - ex_ctr_x) / ex_w; targets_dy[i] = wy * (gt_ctr_y - ex_ctr_y) / ex_h; targets_dw[i] = ww * log(gt_w / ex_w); targets_dh[i] = wh * log(gt_h / ex_h); } } std::vector<torch::Tensor> box_encode_cuda(torch::Tensor boxes, torch::Tensor anchors, float wx, float wy, float ww, float wh){ int minGridSize; int blockSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*) box_encode_kernel, 0, // dynamic memory 0); // maximum utilized threads long size = boxes.size(0); auto targets_dx = torch::ones({size}, torch::CUDA(torch::kFloat)); auto targets_dy = torch::ones({size}, torch::CUDA(torch::kFloat)); auto targets_dw = torch::ones({size}, torch::CUDA(torch::kFloat)); auto targets_dh = torch::ones({size}, torch::CUDA(torch::kFloat)); dim3 gridDim(minGridSize); dim3 blockDim(blockSize); int idxJump = minGridSize * blockSize; auto stream = at::cuda::getCurrentCUDAStream(); box_encode_kernel<<<gridDim,blockDim,0,stream.stream()>>>(targets_dx.data<float>(), targets_dy.data<float>(), targets_dw.data<float>(), targets_dh.data<float>(), (float4*) boxes.data<float>(), (float4*) anchors.data<float>(), wx, wy, ww, wh, size, idxJump); std::vector<torch::Tensor> result; result.push_back(targets_dx); result.push_back(targets_dy); result.push_back(targets_dw); result.push_back(targets_dh); return result; } } }
c556191b45bb257426f65a93786f0670935e70d1.hip
// !!! This is a file automatically generated by hipify!!! #ifndef QP_UTILITY_CU #define QP_UTILITY_CU #include "stdlib.h" #include "stdio.h" #include "math.h" #include "time.h" #include "assert.h" //#include <hip/hip_runtime.h> //#include <cutil.h> #include "QP_Utility.cuh" #include <helper_functions.h> //typedef int2 Record; #define RAND_RANGE(N) ((double)rand()/((double)RAND_MAX + 1)*(N)) int seeded = 0; unsigned int seedValue; void seed_generator(unsigned int seed) { srand(seed); seedValue = seed; seeded = 1; } void check_seed() { if(!seeded) { seedValue = time(NULL); srand(seedValue); seeded = 1; } } void knuth_shuffle(Record *relation, int num_tuples) { int i; for (i = num_tuples-1; i>0; i--) { int j = RAND_RANGE(i); int k_tmp = relation[i].y; relation[i].y = relation[j].y; relation[j].y = k_tmp; } } void random_unique_gen(Record *rel, int num_tuples) { int i; for (i = 0; i < num_tuples; i++) rel[i].x = rel[i].y = (i+1); knuth_shuffle(rel, num_tuples); } int create_relation_pk(Record *relation, int num_tuples) { check_seed(); random_unique_gen(relation, num_tuples); return 0; } int compare (const void * a, const void * b) { return ( ((Record*)a)->y - ((Record*)b)->y ); } void randomize(Record *R, int rLen, int times) { int i=0; int temp=0; int from=0; int to=0; srand(times); const int offset=(1<<15)-1; for(i=0;i<times;i++) { from=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; to=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; temp=R[from].y; R[from].y=R[to].y; R[to].y=temp; } } void int_randomize(int *R, int rLen, int times) { int i=0; int temp=0; int from=0; int to=0; srand(times); const int offset=(1<<15)-1; for(i=0;i<times;i++) { from=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; to=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; temp=R[from]; R[from]=R[to]; R[to]=temp; } } /************************************************************************/ /* This function generates <rLen> random tuples; maybe duplicated. /************************************************************************/ void generateRand(Record *R, int maxmax, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%maxmax; //R[i].x=i+1; R[i].x=i; } } void generateRand1(Record *R, int maxmax, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%maxmax; //R[i].x=i+1; R[i].x=i; } } /************************************************************************/ /* This function generates <rLen> random tuples; maybe duplicated. /************************************************************************/ void generateRandInt(int *R, int max, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i]=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } } //generate the each value for <dup> tuples. //dup=1,2,4,8,16,32 void generateSkewDuplicates(Record *R, int rLen,Record *S, int sLen, int max, int dup, int seed) { int a=0; int i=0; int minmin=0; int maxmax=2; unsigned int mask=(2<<15)-1; int seg=rLen/dup; srand(seed); for(i=0;i<seg;i++) { R[i].y=((((rand()& mask)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; if(i==0) { minmin=maxmax=R[i].y; } else { if(minmin>R[i].y) minmin=R[i].y; if(maxmax<R[i].y) maxmax=R[i].y; } R[i].x=i+1; } //copy the seg to all other segs. for(a=1;a<dup;a++) { for(i=0;i<seg;i++) R[a*seg+i].y=R[i].y; } const int offset=(1<<15)-1; for(i=0;i<sLen;i++) { S[i].x=i+1; S[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } //cout<<"min, "<<minmin<<", max, "<<maxmax<<", rand max, "<<max<<", dup, "<<dup<<endl; #ifdef DEBUG_SAVEN printf("Be careful!!! DEBUGGING IS ENABLED\n"); qsort(R,rLen,sizeof(Record),compare); qsort(S,sLen,sizeof(Record),compare); #endif } void generateJoinSelectivity(Record *R, int rLen, Record *S, int sLen, int max, float joinSel,int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; R[i].x=i+1; } for(i=0;i<sLen;i++) { S[i].x=-1; S[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } int locR=0; int locS=0; int retry=0; const int MAX_RET=1024; double deltaSel=(double)(rLen)/(double)max/1.25; joinSel-=(float)deltaSel; printf("%f,%f,",deltaSel,joinSel); if(joinSel<0) { joinSel=0-joinSel; int numMisses=(int)(joinSel*(float)sLen); for(i=0;i<numMisses;i++) { locR=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; locS=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%sLen; if(S[locS].x==-1) { S[locS].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; S[locS].x=1; retry=0; } else { retry++; i--; if(retry>MAX_RET) break; } } } else { int numHits=(int)(joinSel*(float)sLen); for(i=0;i<numHits;i++) { locR=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; locS=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%sLen; if(S[locS].x==-1) { S[locS].y=R[locR].y; S[locS].x=1; retry=0; } else { retry++; i--; if(retry>MAX_RET) break; } } } for(i=0;i<sLen;i++) { S[i].x=i+1; } //for testing #ifdef DEBUG_SAVEN printf("Be careful!!! DEBUGGING IS ENABLED\n"); qsort(R,rLen,sizeof(Record),compare); qsort(S,sLen,sizeof(Record),compare); #endif } void generateArray(int *R, int base, int step, int max, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i*step+base]=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } } /* * generate <rLen> sorted Record, in ascending order. */ void generateSort(Record *R, int maxmax, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%maxmax; } qsort(R,rLen,sizeof(Record),compare); for(i=0;i<rLen;i++) R[i].x=i; } /************************************************************************/ /* This function generates <rLen> distinct tuples; distinct. /************************************************************************/ /* (1) generate N^0.5 16-bit distinct numbers (stored in array a); (2) generate another N^0.5 16-bit distinct numbers (stored in array b); (3) the result array, x: x[i*N^0.5+j] =(a[i]<<16)b[j] /************************************************************************/ //step (1) and (2) void generate16Bits(int *a, int max, int len, int seed) { const int mask=(1<<16)-1; int i=0; int j=0; int temp=0; srand(seed); for(i=0;i<len;i++) { temp=(((rand()<<1)+(rand()&1))&mask)%max; for(j=0;j<i;j++) if(temp==a[j]) break; if(j==i) a[i]=temp; else i--; } //for(i=0;i<len;i++) // printf("%d,",a[i]); //printf("\n"); } void generateDistinct(Record *R, int max, int rLen, int seed) { int i=0; int j=0; int curNum=0; int done=0; int nSquareRoot=(int)sqrt((double)rLen)+1; int *a=(int *)malloc(sizeof(int)*nSquareRoot); int *b=(int *)malloc(sizeof(int)*nSquareRoot); int maxSqrt=((int)sqrt((double)max)+1); generate16Bits(a,maxSqrt,nSquareRoot,seed); generate16Bits(b,maxSqrt,nSquareRoot,seed+1); for(i=0;i<nSquareRoot && !done;i++) for(j=0;j<nSquareRoot;j++) { R[curNum].y=(a[i]*maxSqrt)+b[j]; R[curNum].x=curNum; curNum++; if(curNum==rLen) { done=1; break; } } free(a); free(b); } void print(Record *R, int rLen) { int i=0; printf("Random max=%d\n",RAND_MAX); for(i=0;i<rLen;i++) { printf("%d,%d\n",R[i].x, R[i].y); } } void generateSkew(Record *R, int max, int rLen, float oneRatio, int seed) { int numOnes=(int)(((float)rLen)*oneRatio); int i=0; int onePos=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; R[i].x=i; if(R[i].y==1) numOnes--; } for(i=0;i<numOnes;i++) { onePos=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; R[onePos].x=onePos; R[onePos].y=1; } /*int numOnes=(int)((double)rLen*oneRatio); int i=0; for(i=0;i<numOnes;i++) { R[i].y=1; R[i].x=i; } const int offset=(1<<15)-1; srand(seed); for(;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; R[i].x=i; } //randomize the array randomize(R, rLen, numOnes);*/ //randomize the array //randomize(R, rLen, numOnes); } /* */ int computeMatrix(float3 *inputList, int n, int nonZero) { char fileName[100]; sprintf(fileName, "M%d.txt",n); FILE *src = fopen(fileName, "r"); if(src!=NULL) { //load the data from the file. printf("loading data from file, %s, ", fileName); int rLen=0; int a, b; float c; while (!feof(src)) { fscanf (src, "%d", &a); if(feof(src)) break; fscanf (src, "%d", &b); if(feof(src)) break; fscanf (src, "%f", &c); if(feof(src)) break; inputList[rLen].x=(float)a; inputList[rLen].y=(float)b; inputList[rLen].z=c; rLen++; } fclose(src); return rLen; } else { //fclose(src); float** A=(float **)malloc(sizeof(float *)*n); for(int i=0;i<n;i++) A[i]=(float *)malloc(sizeof(float)*n); printf("create %s", fileName); float *w=(float *)malloc(sizeof(float)*n); w[0]=0.1; w[n-1]=1.0; float q=(float)pow((float)10.0, (float)1.0/(float)n); int i=0; for(i=1;i<n-1;i++) w[i]=w[i-1]*q; float *x=(float*)malloc(sizeof(float)*n); int j=0,m=0; int tempIndex; for(j=0;j<n;j++) for(m=0;m<n;m++) A[j][m]=0; int tempValue=0; srand(0); for(i=0;i<n;i++)//the main loop { for(j=0;j<n;j++) x[j]=0; for(j=0;j<nonZero;j++) { tempIndex=rand()%n; tempValue=rand()%((1<<16)-1); x[tempIndex]=(float)tempValue/(float)((1<<16)-1); } x[i]=0.5; //compute xTx and add it to the A. for(j=0;j<n;j++) for(m=0;m<n;m++) { A[j][m]+=w[i]*x[j]*x[m]; } } for(i=0;i<n;i++) A[i][i]+=(float)0.1; //count the number of zeros; FILE *src2 = fopen(fileName, "w"); assert(src2); int numNonZeros=0; for(j=0;j<n;j++) for(m=0;m<n;m++) { if(A[j][m]!=0) { fprintf(src2, "%d\n",j); fprintf(src2, "%d\n",m); fprintf(src2, "%f\n",A[j][m]); inputList[numNonZeros].x=j; inputList[numNonZeros].y=m; inputList[numNonZeros].z=A[j][m]; numNonZeros++; } } fclose(src2); printf("numNonZero, %d\n",numNonZeros); //write the matrix to a file. free(x); free(w); free(A); return numNonZeros; } } /************************************************************************/ /* Timing /************************************************************************/ /*static clock_t g_startTime; void startTime() { g_startTime= clock(); } double endTime(char *info) { double cpuTime; clock_t end = clock(); cpuTime= (end-g_startTime)/ (double)CLOCKS_PER_SEC; printf("%s, time, %.3f\n", info, cpuTime); return cpuTime; }*/ StopWatchInterface* g_startTime; void startTime() { CUT_SAFE_CALL( sdkCreateTimer( &g_startTime)); CUT_SAFE_CALL( sdkStartTimer( &g_startTime)); } double endTime(char *info) { hipDeviceSynchronize(); CUT_SAFE_CALL( sdkStopTimer( &g_startTime)); double result=(double)sdkGetTimerValue(&g_startTime); printf("***%s, time, %f, ms***\n", info, result); CUT_SAFE_CALL( sdkDeleteTimer( &g_startTime)); return result; } void startTimer(StopWatchInterface **timer) { CUT_SAFE_CALL( sdkCreateTimer( timer)); CUT_SAFE_CALL( sdkStartTimer( timer)); } double endTimer(char *info, StopWatchInterface **timer) { hipDeviceSynchronize(); CUT_SAFE_CALL( sdkStopTimer( timer)); double result=sdkGetTimerValue(timer); printf("***%s costs, %f, ms***\n", info, result); CUT_SAFE_CALL( sdkDeleteTimer( timer)); return result; } int log2(int value) { int result=0; while(value>1) { value=value>>1; result++; } return result; } int log2Ceil(int value) { int result=log2(value); if(value>(1<<result)) result++; return result; } static clock_t g_startSumTime; static double g_totalTime; void startSumTime() { g_startSumTime= clock(); } void endSumTime() { hipDeviceSynchronize(); double cpuTime; clock_t end = clock(); cpuTime= (end-g_startSumTime)/ (double)CLOCKS_PER_SEC; g_totalTime+=cpuTime; } double printSumTime(char *info) { double cpuTime; clock_t end = clock(); cpuTime= (end-g_startSumTime)/ (double)CLOCKS_PER_SEC; g_totalTime+=cpuTime; printf("***%s costs, %f, ms***\n", info, g_totalTime); g_totalTime=0; return cpuTime; } #endif
c556191b45bb257426f65a93786f0670935e70d1.cu
#ifndef QP_UTILITY_CU #define QP_UTILITY_CU #include "stdlib.h" #include "stdio.h" #include "math.h" #include "time.h" #include "assert.h" //#include <cuda_runtime.h> //#include <cutil.h> #include "QP_Utility.cuh" #include <helper_functions.h> //typedef int2 Record; #define RAND_RANGE(N) ((double)rand()/((double)RAND_MAX + 1)*(N)) int seeded = 0; unsigned int seedValue; void seed_generator(unsigned int seed) { srand(seed); seedValue = seed; seeded = 1; } void check_seed() { if(!seeded) { seedValue = time(NULL); srand(seedValue); seeded = 1; } } void knuth_shuffle(Record *relation, int num_tuples) { int i; for (i = num_tuples-1; i>0; i--) { int j = RAND_RANGE(i); int k_tmp = relation[i].y; relation[i].y = relation[j].y; relation[j].y = k_tmp; } } void random_unique_gen(Record *rel, int num_tuples) { int i; for (i = 0; i < num_tuples; i++) rel[i].x = rel[i].y = (i+1); knuth_shuffle(rel, num_tuples); } int create_relation_pk(Record *relation, int num_tuples) { check_seed(); random_unique_gen(relation, num_tuples); return 0; } int compare (const void * a, const void * b) { return ( ((Record*)a)->y - ((Record*)b)->y ); } void randomize(Record *R, int rLen, int times) { int i=0; int temp=0; int from=0; int to=0; srand(times); const int offset=(1<<15)-1; for(i=0;i<times;i++) { from=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; to=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; temp=R[from].y; R[from].y=R[to].y; R[to].y=temp; } } void int_randomize(int *R, int rLen, int times) { int i=0; int temp=0; int from=0; int to=0; srand(times); const int offset=(1<<15)-1; for(i=0;i<times;i++) { from=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; to=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; temp=R[from]; R[from]=R[to]; R[to]=temp; } } /************************************************************************/ /* This function generates <rLen> random tuples; maybe duplicated. /************************************************************************/ void generateRand(Record *R, int maxmax, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%maxmax; //R[i].x=i+1; R[i].x=i; } } void generateRand1(Record *R, int maxmax, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%maxmax; //R[i].x=i+1; R[i].x=i; } } /************************************************************************/ /* This function generates <rLen> random tuples; maybe duplicated. /************************************************************************/ void generateRandInt(int *R, int max, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i]=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } } //generate the each value for <dup> tuples. //dup=1,2,4,8,16,32 void generateSkewDuplicates(Record *R, int rLen,Record *S, int sLen, int max, int dup, int seed) { int a=0; int i=0; int minmin=0; int maxmax=2; unsigned int mask=(2<<15)-1; int seg=rLen/dup; srand(seed); for(i=0;i<seg;i++) { R[i].y=((((rand()& mask)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; if(i==0) { minmin=maxmax=R[i].y; } else { if(minmin>R[i].y) minmin=R[i].y; if(maxmax<R[i].y) maxmax=R[i].y; } R[i].x=i+1; } //copy the seg to all other segs. for(a=1;a<dup;a++) { for(i=0;i<seg;i++) R[a*seg+i].y=R[i].y; } const int offset=(1<<15)-1; for(i=0;i<sLen;i++) { S[i].x=i+1; S[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } //cout<<"min, "<<minmin<<", max, "<<maxmax<<", rand max, "<<max<<", dup, "<<dup<<endl; #ifdef DEBUG_SAVEN printf("Be careful!!! DEBUGGING IS ENABLED\n"); qsort(R,rLen,sizeof(Record),compare); qsort(S,sLen,sizeof(Record),compare); #endif } void generateJoinSelectivity(Record *R, int rLen, Record *S, int sLen, int max, float joinSel,int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; R[i].x=i+1; } for(i=0;i<sLen;i++) { S[i].x=-1; S[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } int locR=0; int locS=0; int retry=0; const int MAX_RET=1024; double deltaSel=(double)(rLen)/(double)max/1.25; joinSel-=(float)deltaSel; printf("%f,%f,",deltaSel,joinSel); if(joinSel<0) { joinSel=0-joinSel; int numMisses=(int)(joinSel*(float)sLen); for(i=0;i<numMisses;i++) { locR=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; locS=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%sLen; if(S[locS].x==-1) { S[locS].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; S[locS].x=1; retry=0; } else { retry++; i--; if(retry>MAX_RET) break; } } } else { int numHits=(int)(joinSel*(float)sLen); for(i=0;i<numHits;i++) { locR=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; locS=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%sLen; if(S[locS].x==-1) { S[locS].y=R[locR].y; S[locS].x=1; retry=0; } else { retry++; i--; if(retry>MAX_RET) break; } } } for(i=0;i<sLen;i++) { S[i].x=i+1; } //for testing #ifdef DEBUG_SAVEN printf("Be careful!!! DEBUGGING IS ENABLED\n"); qsort(R,rLen,sizeof(Record),compare); qsort(S,sLen,sizeof(Record),compare); #endif } void generateArray(int *R, int base, int step, int max, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i*step+base]=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; } } /* * generate <rLen> sorted Record, in ascending order. */ void generateSort(Record *R, int maxmax, int rLen, int seed) { int i=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%maxmax; } qsort(R,rLen,sizeof(Record),compare); for(i=0;i<rLen;i++) R[i].x=i; } /************************************************************************/ /* This function generates <rLen> distinct tuples; distinct. /************************************************************************/ /* (1) generate N^0.5 16-bit distinct numbers (stored in array a); (2) generate another N^0.5 16-bit distinct numbers (stored in array b); (3) the result array, x: x[i*N^0.5+j] =(a[i]<<16)£ęb[j] /************************************************************************/ //step (1) and (2) void generate16Bits(int *a, int max, int len, int seed) { const int mask=(1<<16)-1; int i=0; int j=0; int temp=0; srand(seed); for(i=0;i<len;i++) { temp=(((rand()<<1)+(rand()&1))&mask)%max; for(j=0;j<i;j++) if(temp==a[j]) break; if(j==i) a[i]=temp; else i--; } //for(i=0;i<len;i++) // printf("%d,",a[i]); //printf("\n"); } void generateDistinct(Record *R, int max, int rLen, int seed) { int i=0; int j=0; int curNum=0; int done=0; int nSquareRoot=(int)sqrt((double)rLen)+1; int *a=(int *)malloc(sizeof(int)*nSquareRoot); int *b=(int *)malloc(sizeof(int)*nSquareRoot); int maxSqrt=((int)sqrt((double)max)+1); generate16Bits(a,maxSqrt,nSquareRoot,seed); generate16Bits(b,maxSqrt,nSquareRoot,seed+1); for(i=0;i<nSquareRoot && !done;i++) for(j=0;j<nSquareRoot;j++) { R[curNum].y=(a[i]*maxSqrt)+b[j]; R[curNum].x=curNum; curNum++; if(curNum==rLen) { done=1; break; } } free(a); free(b); } void print(Record *R, int rLen) { int i=0; printf("Random max=%d\n",RAND_MAX); for(i=0;i<rLen;i++) { printf("%d,%d\n",R[i].x, R[i].y); } } void generateSkew(Record *R, int max, int rLen, float oneRatio, int seed) { int numOnes=(int)(((float)rLen)*oneRatio); int i=0; int onePos=0; const int offset=(1<<15)-1; srand(seed); for(i=0;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; R[i].x=i; if(R[i].y==1) numOnes--; } for(i=0;i<numOnes;i++) { onePos=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%rLen; R[onePos].x=onePos; R[onePos].y=1; } /*int numOnes=(int)((double)rLen*oneRatio); int i=0; for(i=0;i<numOnes;i++) { R[i].y=1; R[i].x=i; } const int offset=(1<<15)-1; srand(seed); for(;i<rLen;i++) { R[i].y=((((rand()& offset)<<15)+(rand()&1))+(rand()<<1)+(rand()&1))%max; R[i].x=i; } //randomize the array randomize(R, rLen, numOnes);*/ //randomize the array //randomize(R, rLen, numOnes); } /* */ int computeMatrix(float3 *inputList, int n, int nonZero) { char fileName[100]; sprintf(fileName, "M%d.txt",n); FILE *src = fopen(fileName, "r"); if(src!=NULL) { //load the data from the file. printf("loading data from file, %s, ", fileName); int rLen=0; int a, b; float c; while (!feof(src)) { fscanf (src, "%d", &a); if(feof(src)) break; fscanf (src, "%d", &b); if(feof(src)) break; fscanf (src, "%f", &c); if(feof(src)) break; inputList[rLen].x=(float)a; inputList[rLen].y=(float)b; inputList[rLen].z=c; rLen++; } fclose(src); return rLen; } else { //fclose(src); float** A=(float **)malloc(sizeof(float *)*n); for(int i=0;i<n;i++) A[i]=(float *)malloc(sizeof(float)*n); printf("create %s", fileName); float *w=(float *)malloc(sizeof(float)*n); w[0]=0.1; w[n-1]=1.0; float q=(float)pow((float)10.0, (float)1.0/(float)n); int i=0; for(i=1;i<n-1;i++) w[i]=w[i-1]*q; float *x=(float*)malloc(sizeof(float)*n); int j=0,m=0; int tempIndex; for(j=0;j<n;j++) for(m=0;m<n;m++) A[j][m]=0; int tempValue=0; srand(0); for(i=0;i<n;i++)//the main loop { for(j=0;j<n;j++) x[j]=0; for(j=0;j<nonZero;j++) { tempIndex=rand()%n; tempValue=rand()%((1<<16)-1); x[tempIndex]=(float)tempValue/(float)((1<<16)-1); } x[i]=0.5; //compute xTx and add it to the A. for(j=0;j<n;j++) for(m=0;m<n;m++) { A[j][m]+=w[i]*x[j]*x[m]; } } for(i=0;i<n;i++) A[i][i]+=(float)0.1; //count the number of zeros; FILE *src2 = fopen(fileName, "w"); assert(src2); int numNonZeros=0; for(j=0;j<n;j++) for(m=0;m<n;m++) { if(A[j][m]!=0) { fprintf(src2, "%d\n",j); fprintf(src2, "%d\n",m); fprintf(src2, "%f\n",A[j][m]); inputList[numNonZeros].x=j; inputList[numNonZeros].y=m; inputList[numNonZeros].z=A[j][m]; numNonZeros++; } } fclose(src2); printf("numNonZero, %d\n",numNonZeros); //write the matrix to a file. free(x); free(w); free(A); return numNonZeros; } } /************************************************************************/ /* Timing /************************************************************************/ /*static clock_t g_startTime; void startTime() { g_startTime= clock(); } double endTime(char *info) { double cpuTime; clock_t end = clock(); cpuTime= (end-g_startTime)/ (double)CLOCKS_PER_SEC; printf("%s, time, %.3f\n", info, cpuTime); return cpuTime; }*/ StopWatchInterface* g_startTime; void startTime() { CUT_SAFE_CALL( sdkCreateTimer( &g_startTime)); CUT_SAFE_CALL( sdkStartTimer( &g_startTime)); } double endTime(char *info) { cudaThreadSynchronize(); CUT_SAFE_CALL( sdkStopTimer( &g_startTime)); double result=(double)sdkGetTimerValue(&g_startTime); printf("***%s, time, %f, ms***\n", info, result); CUT_SAFE_CALL( sdkDeleteTimer( &g_startTime)); return result; } void startTimer(StopWatchInterface **timer) { CUT_SAFE_CALL( sdkCreateTimer( timer)); CUT_SAFE_CALL( sdkStartTimer( timer)); } double endTimer(char *info, StopWatchInterface **timer) { cudaThreadSynchronize(); CUT_SAFE_CALL( sdkStopTimer( timer)); double result=sdkGetTimerValue(timer); printf("***%s costs, %f, ms***\n", info, result); CUT_SAFE_CALL( sdkDeleteTimer( timer)); return result; } int log2(int value) { int result=0; while(value>1) { value=value>>1; result++; } return result; } int log2Ceil(int value) { int result=log2(value); if(value>(1<<result)) result++; return result; } static clock_t g_startSumTime; static double g_totalTime; void startSumTime() { g_startSumTime= clock(); } void endSumTime() { cudaThreadSynchronize(); double cpuTime; clock_t end = clock(); cpuTime= (end-g_startSumTime)/ (double)CLOCKS_PER_SEC; g_totalTime+=cpuTime; } double printSumTime(char *info) { double cpuTime; clock_t end = clock(); cpuTime= (end-g_startSumTime)/ (double)CLOCKS_PER_SEC; g_totalTime+=cpuTime; printf("***%s costs, %f, ms***\n", info, g_totalTime); g_totalTime=0; return cpuTime; } #endif
9951465500b1647d5b64a89c038bd3e538e8211b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "hello_cpu.h" #include "hello_gpu.h" int main(){ helloCPU(); hipLaunchKernelGGL(( helloGPU), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); }
9951465500b1647d5b64a89c038bd3e538e8211b.cu
#include <iostream> #include "hello_cpu.h" #include "hello_gpu.h" int main(){ helloCPU(); helloGPU<<<1,1>>>(); cudaDeviceSynchronize(); }
4544dd44675ba517b273787e6ad9664c50d59c24.hip
// !!! This is a file automatically generated by hipify!!! //-------------------------------------------------------------------------------- // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met : // // *Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright(c) 2019, Sergen Eren // All rights reserved. //---------------------------------------------------------------------------------- // // Version 1.0: Sergen Eren, 02/11/2019 // // File: Kernels to calculate and load the procedural sky value and cdf textures // //----------------------------------------------- #define _USE_MATH_DEFINES #include <cmath> #include <stdio.h> #include <float.h> // Cuda includes #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <device_launch_parameters.h> #include "helper_math.h" // Internal includes #include "kernel_params.h" #include "hip_noise.cuh" #define INV_2_PI 1.0f / (2.0f * M_PI) #define INV_4_PI 1.0f / (4.0f * M_PI) #define INV_PI 1.0f / M_PI typedef hiprandStatePhilox4_32_10_t Rand_state; #define rand(state) hiprand_uniform(state) extern "C" __global__ void glow(const Kernel_params kernel_params, float treshold , const int width, const int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; const unsigned int idx = y * width + x; // TODO gaussian blur and add glow effect to display buffer } extern "C" __global__ void fill_volume_buffer( float *buffer, const int3 dims, const float scale, const int noise_type) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= dims.x || y >= dims.y || z >= dims.z) return; const unsigned int idx = x + dims.x * (y + dims.y * z); Rand_state rand_state; int seed = 123; float du = 1.0f / (float)dims.x; float dx = cudaNoise::randomFloat(482 + floor(rand(&rand_state) * 2) * 47 + seed) / (float)dims.x; float dy = cudaNoise::randomFloat(472 + floor(rand(&rand_state) * 2) * 38 + seed) / (float)dims.y; float dz = cudaNoise::randomFloat(348 + floor(rand(&rand_state) * 2) * 14 + seed) / (float)dims.z; float3 pos = make_float3(x+dx, y+dy, z+dz); switch (noise_type) { case(0): buffer[idx] = cudaNoise::perlinNoise(pos, scale, seed); break; case(1): buffer[idx] = cudaNoise::simplexNoise(pos, scale, seed); break; case(2): buffer[idx] = cudaNoise::worleyNoise(pos, scale, seed, 300.1f, 4, 4, 1.0f); break; case(3): buffer[idx] = cudaNoise::repeaterPerlin(pos, scale, seed, 128, 1.9f, 0.5f); break; case(4): buffer[idx] = cudaNoise::repeaterPerlinAbs(pos, scale, seed, 128, 1.9f, 0.5f); break; case(5): buffer[idx] = cudaNoise::fractalSimplex(pos, scale, seed, du, 512, 1.5f, 0.95f); break; case(6): buffer[idx] = cudaNoise::repeaterTurbulence(pos, 0.2f, scale, seed, 0.8f, 32, cudaNoise::BASIS_PERLIN, cudaNoise::BASIS_PERLIN); break; case(7): buffer[idx] = cudaNoise::cubicValue(pos, scale, seed); break; case(8): buffer[idx] = cudaNoise::spots(pos, scale, seed, 0.1f, 0, 8, 1.0f, cudaNoise::SHAPE_STEP); break; } }
4544dd44675ba517b273787e6ad9664c50d59c24.cu
//-------------------------------------------------------------------------------- // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met : // // *Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright(c) 2019, Sergen Eren // All rights reserved. //---------------------------------------------------------------------------------- // // Version 1.0: Sergen Eren, 02/11/2019 // // File: Kernels to calculate and load the procedural sky value and cdf textures // //----------------------------------------------- #define _USE_MATH_DEFINES #include <cmath> #include <stdio.h> #include <float.h> // Cuda includes #include <cuda_runtime.h> #include <curand_kernel.h> #include <device_launch_parameters.h> #include "helper_math.h" // Internal includes #include "kernel_params.h" #include "cuda_noise.cuh" #define INV_2_PI 1.0f / (2.0f * M_PI) #define INV_4_PI 1.0f / (4.0f * M_PI) #define INV_PI 1.0f / M_PI typedef curandStatePhilox4_32_10_t Rand_state; #define rand(state) curand_uniform(state) extern "C" __global__ void glow(const Kernel_params kernel_params, float treshold , const int width, const int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; const unsigned int idx = y * width + x; // TODO gaussian blur and add glow effect to display buffer } extern "C" __global__ void fill_volume_buffer( float *buffer, const int3 dims, const float scale, const int noise_type) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= dims.x || y >= dims.y || z >= dims.z) return; const unsigned int idx = x + dims.x * (y + dims.y * z); Rand_state rand_state; int seed = 123; float du = 1.0f / (float)dims.x; float dx = cudaNoise::randomFloat(482 + floor(rand(&rand_state) * 2) * 47 + seed) / (float)dims.x; float dy = cudaNoise::randomFloat(472 + floor(rand(&rand_state) * 2) * 38 + seed) / (float)dims.y; float dz = cudaNoise::randomFloat(348 + floor(rand(&rand_state) * 2) * 14 + seed) / (float)dims.z; float3 pos = make_float3(x+dx, y+dy, z+dz); switch (noise_type) { case(0): buffer[idx] = cudaNoise::perlinNoise(pos, scale, seed); break; case(1): buffer[idx] = cudaNoise::simplexNoise(pos, scale, seed); break; case(2): buffer[idx] = cudaNoise::worleyNoise(pos, scale, seed, 300.1f, 4, 4, 1.0f); break; case(3): buffer[idx] = cudaNoise::repeaterPerlin(pos, scale, seed, 128, 1.9f, 0.5f); break; case(4): buffer[idx] = cudaNoise::repeaterPerlinAbs(pos, scale, seed, 128, 1.9f, 0.5f); break; case(5): buffer[idx] = cudaNoise::fractalSimplex(pos, scale, seed, du, 512, 1.5f, 0.95f); break; case(6): buffer[idx] = cudaNoise::repeaterTurbulence(pos, 0.2f, scale, seed, 0.8f, 32, cudaNoise::BASIS_PERLIN, cudaNoise::BASIS_PERLIN); break; case(7): buffer[idx] = cudaNoise::cubicValue(pos, scale, seed); break; case(8): buffer[idx] = cudaNoise::spots(pos, scale, seed, 0.1f, 0, 8, 1.0f, cudaNoise::SHAPE_STEP); break; } }
1be6448548fa144ffc1287e00a3e2fffb4488597.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_1D_2D.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_1D_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_1D_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_1D_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1be6448548fa144ffc1287e00a3e2fffb4488597.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_1D_2D.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_1D_2D<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_1D_2D<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_1D_2D<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
69a15eeeaa04c22c4813eb8b0001e3073005b73e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file roipooling_gpu.cu // @brief Region of interest pooling block implementation (VLDT_GPU) // @author Hakan Bilen // @author Abishek Dutta // @author Andrea Vedaldi /* Copyright (C) 2016 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "roipooling.hpp" #include "../datacu.hpp" #include <assert.h> #include <cfloat> #include <algorithm> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* Helpers */ /* ---------------------------------------------------------------- */ template<typename T> struct Geom { int subdivisions[2] ; T transform[6] ; Geom(int const subdivisions[2], double const transform[6]) { this->subdivisions[0] = subdivisions[0] ; this->subdivisions[1] = subdivisions[1] ; this->transform[0] = transform[0] ; this->transform[1] = transform[1] ; this->transform[2] = transform[2] ; this->transform[3] = transform[3] ; this->transform[4] = transform[4] ; this->transform[5] = transform[5] ; } } ; struct Bounds { int image, offset, hstart, hend, wstart, wend ; bool isEmpty ; } ; template<typename T> __device__ __forceinline__ static Bounds getBounds(int outputIndex, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { Bounds b ; int ph = outputIndex ; int pw = ph / geom.subdivisions[0] ; int pc = pw / geom.subdivisions[1] ; int pr = pc / numChannels ; ph %= geom.subdivisions[0] ; pw %= geom.subdivisions[1] ; pc %= numChannels ; rois += 5 * pr ; // Apply sacle and offset to each ROI coordinate. T u1_ = rois[1] ; T v1_ = rois[2] ; T u2_ = rois[3] ; T v2_ = rois[4] ; T u1 = geom.transform[0] * u1_ + geom.transform[2] * v1_ + geom.transform[4] ; T v1 = geom.transform[1] * u1_ + geom.transform[3] * v1_ + geom.transform[5] ; T u2 = geom.transform[0] * u2_ + geom.transform[2] * v2_ + geom.transform[4] ; T v2 = geom.transform[1] * u2_ + geom.transform[3] * v2_ + geom.transform[5] ; // First and last pixel of each ROI (rounded // for compatibility with the Caffe definition). int roi_image = (int)rois[0]; int roi_start_h = (int)round(v1) - 1 ; int roi_start_w = (int)round(u1) - 1 ; int roi_end_h = (int)round(v2) - 1 ; int roi_end_w = (int)round(u2) - 1 ; int roi_height = max(roi_end_h - roi_start_h + 1, 1) ; int roi_width = max(roi_end_w - roi_start_w + 1, 1) ; T bin_size_h = (T)roi_height / geom.subdivisions[0] ; T bin_size_w = (T)roi_width / geom.subdivisions[1] ; roi_image = min(max(roi_image - 1,0), (int)size - 1) ; b.offset = (roi_image * numChannels + pc) * (width*height) ; b.wstart = (int)floor(((T)pw) * bin_size_w) ; b.wend = (int)ceil(((T)(pw + 1)) * bin_size_w) ; b.wstart = min(max(b.wstart + roi_start_w, 0), (int)width) ; b.wend = min(max(b.wend + roi_start_w, 0), (int)width) ; b.hstart = (int)floor(((T)ph) * bin_size_h) ; b.hend = (int)ceil(((T)(ph + 1)) * bin_size_h) ; b.hstart = min(max(b.hstart + roi_start_h, 0), (int)height) ; b.hend = min(max(b.hend + roi_start_h, 0), (int)height) ; b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ; return b ; } /* ---------------------------------------------------------------- */ /* roipooling_average_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_average_kernel (T* output, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; T bestValue = 0; const T coeff = ((T)1.) / (T)((b.wend-b.wstart) * (b.hend-b.hstart)); for (int w = b.wstart; w < b.wend; ++w) { for (int h = b.hstart; h < b.hend; ++h) { int index = w * height + h ; bestValue += data[index] * coeff ; } } output[outputIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* roipooling_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_max_kernel (T* output, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; if (! b.isEmpty) { T bestValue = -FLT_MAX; for (int w = b.wstart; w < b.wend; ++w) { for (int h = b.hstart; h < b.hend; ++h) { int index = w * height + h ; bestValue = max(bestValue, data[index]) ; } } output[outputIndex] = bestValue ; } else { output[outputIndex] = 0 ; } } } /* ---------------------------------------------------------------- */ /* atomicAdd */ /* ---------------------------------------------------------------- */ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else // an implementation of atomicAdd() for double (really slow) static __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif /* ---------------------------------------------------------------- */ /* roipooling_average_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_average_backward_kernel (T* derData, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, const T* derOutput, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; derData += b.offset ; const T coeff = ((T)1.) / (T)((b.wend-b.wstart)*(b.hend-b.hstart)) ; for (int h = b.hstart; h < b.hend; ++h) { for (int w = b.wstart; w < b.wend; ++w) { int index = w * height + h ; atomicAdd(derData + index, derOutput[outputIndex] * coeff) ; } } } } /* ---------------------------------------------------------------- */ /* roipooling_max_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_max_backward_kernel (T* derData, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, const T* derOutput, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; if (! b.isEmpty) { data += b.offset ; derData += b.offset ; int bestIndex = min(b.wstart,width-1) * height + min(b.hstart,height-1); T bestValue = -FLT_MAX; for (int h = b.hstart; h < b.hend; ++h) { for (int w = b.wstart; w < b.wend; ++w) { int index = w * height + h ; T value = data[index] ; if (value > bestValue) { bestValue = value ; bestIndex = index ; } } } atomicAdd(derData + bestIndex, derOutput[outputIndex]) ; } } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct roipooling_max<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* output, type const* data, size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; hipLaunchKernelGGL(( roipooling_max_kernel<type>) , dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)),dim3(VL_CUDA_NUM_THREADS) , 0, 0, output, data, height, width, numChannels, size, rois, numROIs, Geom<type>(subdivisions,transform)) ; hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, type const* derOutput, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; hipLaunchKernelGGL(( roipooling_max_backward_kernel<type>) , dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, data, height, width, numChannels, size, rois, numROIs, derOutput, Geom<type>(subdivisions,transform)) ; hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // roipooling_max template <typename type> struct roipooling_average<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* output, type const* data, size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; hipLaunchKernelGGL(( roipooling_average_kernel<type>) , dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)),dim3(VL_CUDA_NUM_THREADS) , 0, 0, output, data, height, width, numChannels, size, rois, numROIs, Geom<type>(subdivisions,transform)) ; hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, // <- this is not needed for avg pooling size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, type const* derOutput, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; hipLaunchKernelGGL(( roipooling_average_backward_kernel<type>) , dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, data, height, width, numChannels, size, rois, numROIs, derOutput, Geom<type>(subdivisions,transform)) ; hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // roipooling_average } } ; // namespace vl::impl // Instantiations template struct vl::impl::roipooling_max<vl::VLDT_GPU, float> ; template struct vl::impl::roipooling_average<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::roipooling_max<vl::VLDT_GPU, double> ; template struct vl::impl::roipooling_average<vl::VLDT_GPU, double> ; #endif
69a15eeeaa04c22c4813eb8b0001e3073005b73e.cu
// @file roipooling_gpu.cu // @brief Region of interest pooling block implementation (VLDT_GPU) // @author Hakan Bilen // @author Abishek Dutta // @author Andrea Vedaldi /* Copyright (C) 2016 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "roipooling.hpp" #include "../datacu.hpp" #include <assert.h> #include <cfloat> #include <algorithm> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* Helpers */ /* ---------------------------------------------------------------- */ template<typename T> struct Geom { int subdivisions[2] ; T transform[6] ; Geom(int const subdivisions[2], double const transform[6]) { this->subdivisions[0] = subdivisions[0] ; this->subdivisions[1] = subdivisions[1] ; this->transform[0] = transform[0] ; this->transform[1] = transform[1] ; this->transform[2] = transform[2] ; this->transform[3] = transform[3] ; this->transform[4] = transform[4] ; this->transform[5] = transform[5] ; } } ; struct Bounds { int image, offset, hstart, hend, wstart, wend ; bool isEmpty ; } ; template<typename T> __device__ __forceinline__ static Bounds getBounds(int outputIndex, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { Bounds b ; int ph = outputIndex ; int pw = ph / geom.subdivisions[0] ; int pc = pw / geom.subdivisions[1] ; int pr = pc / numChannels ; ph %= geom.subdivisions[0] ; pw %= geom.subdivisions[1] ; pc %= numChannels ; rois += 5 * pr ; // Apply sacle and offset to each ROI coordinate. T u1_ = rois[1] ; T v1_ = rois[2] ; T u2_ = rois[3] ; T v2_ = rois[4] ; T u1 = geom.transform[0] * u1_ + geom.transform[2] * v1_ + geom.transform[4] ; T v1 = geom.transform[1] * u1_ + geom.transform[3] * v1_ + geom.transform[5] ; T u2 = geom.transform[0] * u2_ + geom.transform[2] * v2_ + geom.transform[4] ; T v2 = geom.transform[1] * u2_ + geom.transform[3] * v2_ + geom.transform[5] ; // First and last pixel of each ROI (rounded // for compatibility with the Caffe definition). int roi_image = (int)rois[0]; int roi_start_h = (int)round(v1) - 1 ; int roi_start_w = (int)round(u1) - 1 ; int roi_end_h = (int)round(v2) - 1 ; int roi_end_w = (int)round(u2) - 1 ; int roi_height = max(roi_end_h - roi_start_h + 1, 1) ; int roi_width = max(roi_end_w - roi_start_w + 1, 1) ; T bin_size_h = (T)roi_height / geom.subdivisions[0] ; T bin_size_w = (T)roi_width / geom.subdivisions[1] ; roi_image = min(max(roi_image - 1,0), (int)size - 1) ; b.offset = (roi_image * numChannels + pc) * (width*height) ; b.wstart = (int)floor(((T)pw) * bin_size_w) ; b.wend = (int)ceil(((T)(pw + 1)) * bin_size_w) ; b.wstart = min(max(b.wstart + roi_start_w, 0), (int)width) ; b.wend = min(max(b.wend + roi_start_w, 0), (int)width) ; b.hstart = (int)floor(((T)ph) * bin_size_h) ; b.hend = (int)ceil(((T)(ph + 1)) * bin_size_h) ; b.hstart = min(max(b.hstart + roi_start_h, 0), (int)height) ; b.hend = min(max(b.hend + roi_start_h, 0), (int)height) ; b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ; return b ; } /* ---------------------------------------------------------------- */ /* roipooling_average_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_average_kernel (T* output, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; T bestValue = 0; const T coeff = ((T)1.) / (T)((b.wend-b.wstart) * (b.hend-b.hstart)); for (int w = b.wstart; w < b.wend; ++w) { for (int h = b.hstart; h < b.hend; ++h) { int index = w * height + h ; bestValue += data[index] * coeff ; } } output[outputIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* roipooling_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_max_kernel (T* output, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; if (! b.isEmpty) { T bestValue = -FLT_MAX; for (int w = b.wstart; w < b.wend; ++w) { for (int h = b.hstart; h < b.hend; ++h) { int index = w * height + h ; bestValue = max(bestValue, data[index]) ; } } output[outputIndex] = bestValue ; } else { output[outputIndex] = 0 ; } } } /* ---------------------------------------------------------------- */ /* atomicAdd */ /* ---------------------------------------------------------------- */ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else // an implementation of atomicAdd() for double (really slow) static __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif /* ---------------------------------------------------------------- */ /* roipooling_average_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_average_backward_kernel (T* derData, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, const T* derOutput, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; derData += b.offset ; const T coeff = ((T)1.) / (T)((b.wend-b.wstart)*(b.hend-b.hstart)) ; for (int h = b.hstart; h < b.hend; ++h) { for (int w = b.wstart; w < b.wend; ++w) { int index = w * height + h ; atomicAdd(derData + index, derOutput[outputIndex] * coeff) ; } } } } /* ---------------------------------------------------------------- */ /* roipooling_max_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_max_backward_kernel (T* derData, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, const T* derOutput, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; if (! b.isEmpty) { data += b.offset ; derData += b.offset ; int bestIndex = min(b.wstart,width-1) * height + min(b.hstart,height-1); T bestValue = -FLT_MAX; for (int h = b.hstart; h < b.hend; ++h) { for (int w = b.wstart; w < b.wend; ++w) { int index = w * height + h ; T value = data[index] ; if (value > bestValue) { bestValue = value ; bestIndex = index ; } } } atomicAdd(derData + bestIndex, derOutput[outputIndex]) ; } } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct roipooling_max<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* output, type const* data, size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; roipooling_max_kernel<type> <<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>> (output, data, height, width, numChannels, size, rois, numROIs, Geom<type>(subdivisions,transform)) ; cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, type const* derOutput, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; roipooling_max_backward_kernel<type> <<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, data, height, width, numChannels, size, rois, numROIs, derOutput, Geom<type>(subdivisions,transform)) ; cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // roipooling_max template <typename type> struct roipooling_average<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* output, type const* data, size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; roipooling_average_kernel<type> <<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>> (output, data, height, width, numChannels, size, rois, numROIs, Geom<type>(subdivisions,transform)) ; cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, // <- this is not needed for avg pooling size_t height, size_t width, size_t numChannels, size_t size, type const* rois, size_t numROIs, type const* derOutput, int const subdivisions[2], double const transform[6]) { int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ; roipooling_average_backward_kernel<type> <<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, data, height, width, numChannels, size, rois, numROIs, derOutput, Geom<type>(subdivisions,transform)) ; cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // roipooling_average } } ; // namespace vl::impl // Instantiations template struct vl::impl::roipooling_max<vl::VLDT_GPU, float> ; template struct vl::impl::roipooling_average<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::roipooling_max<vl::VLDT_GPU, double> ; template struct vl::impl::roipooling_average<vl::VLDT_GPU, double> ; #endif
ac9d17e1f0dee66a6475a95a2b8567a869376711.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> void printArr( int arr[], int n ) { int i; for ( i = 0; i < n; ++i ) printf( "%d ", arr[i] ); } __device__ int d_size; __global__ void partition (int *arr, int *arr_l, int *arr_h, int n) { int z = blockIdx.x*blockDim.x+threadIdx.x; d_size = 0; __syncthreads(); if (z<n) { int h = arr_h[z]; int l = arr_l[z]; int x = arr[h]; int i = (l - 1); int temp; for (int j = l; j <= h- 1; j++) { if (arr[j] <= x) { i++; temp = arr[i]; arr[i] = arr[j]; arr[j] = temp; } } temp = arr[i+1]; arr[i+1] = arr[h]; arr[h] = temp; int p = (i + 1); if (p-1 > l) { int ind = atomicAdd(&d_size, 1); arr_l[ind] = l; arr_h[ind] = p-1; } if ( p+1 < h ) { int ind = atomicAdd(&d_size, 1); arr_l[ind] = p+1; arr_h[ind] = h; } } } void quickSortIterative (int arr[], int l, int h) { int lstack[ h - l + 1 ], hstack[ h - l + 1]; int top = -1, *d_d, *d_l, *d_h; lstack[ ++top ] = l; hstack[ top ] = h; hipMalloc(&d_d, (h-l+1)*sizeof(int)); hipMemcpy(d_d, arr,(h-l+1)*sizeof(int),hipMemcpyHostToDevice); hipMalloc(&d_l, (h-l+1)*sizeof(int)); hipMemcpy(d_l, lstack,(h-l+1)*sizeof(int),hipMemcpyHostToDevice); hipMalloc(&d_h, (h-l+1)*sizeof(int)); hipMemcpy(d_h, hstack,(h-l+1)*sizeof(int),hipMemcpyHostToDevice); int n_t = 1; int n_b = 1; int n_i = 1; while ( n_i > 0 ) { hipLaunchKernelGGL(( partition), dim3(n_b),dim3(n_t), 0, 0, d_d, d_l, d_h, n_i); int answer; hipMemcpyFromSymbol(&answer, d_size, sizeof(int), 0, hipMemcpyDeviceToHost); if (answer < 1024) { n_t = answer; } else { n_t = 1024; n_b = answer/n_t + (answer%n_t==0?0:1); } n_i = answer; hipMemcpy(arr, d_d,(h-l+1)*sizeof(int),hipMemcpyDeviceToHost); } } int main() { int l; srand(time(NULL)); printf("Enter size"); scanf("%d",&l); int arr[l]; for (int i = 0; i<l; i++) { arr[i] = rand ()%(2*l); } int n = sizeof( arr ) / sizeof( *arr ); quickSortIterative( arr, 0, n - 1 ); // printArr( arr, n ); return 0; }
ac9d17e1f0dee66a6475a95a2b8567a869376711.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> void printArr( int arr[], int n ) { int i; for ( i = 0; i < n; ++i ) printf( "%d ", arr[i] ); } __device__ int d_size; __global__ void partition (int *arr, int *arr_l, int *arr_h, int n) { int z = blockIdx.x*blockDim.x+threadIdx.x; d_size = 0; __syncthreads(); if (z<n) { int h = arr_h[z]; int l = arr_l[z]; int x = arr[h]; int i = (l - 1); int temp; for (int j = l; j <= h- 1; j++) { if (arr[j] <= x) { i++; temp = arr[i]; arr[i] = arr[j]; arr[j] = temp; } } temp = arr[i+1]; arr[i+1] = arr[h]; arr[h] = temp; int p = (i + 1); if (p-1 > l) { int ind = atomicAdd(&d_size, 1); arr_l[ind] = l; arr_h[ind] = p-1; } if ( p+1 < h ) { int ind = atomicAdd(&d_size, 1); arr_l[ind] = p+1; arr_h[ind] = h; } } } void quickSortIterative (int arr[], int l, int h) { int lstack[ h - l + 1 ], hstack[ h - l + 1]; int top = -1, *d_d, *d_l, *d_h; lstack[ ++top ] = l; hstack[ top ] = h; cudaMalloc(&d_d, (h-l+1)*sizeof(int)); cudaMemcpy(d_d, arr,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice); cudaMalloc(&d_l, (h-l+1)*sizeof(int)); cudaMemcpy(d_l, lstack,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice); cudaMalloc(&d_h, (h-l+1)*sizeof(int)); cudaMemcpy(d_h, hstack,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice); int n_t = 1; int n_b = 1; int n_i = 1; while ( n_i > 0 ) { partition<<<n_b,n_t>>>( d_d, d_l, d_h, n_i); int answer; cudaMemcpyFromSymbol(&answer, d_size, sizeof(int), 0, cudaMemcpyDeviceToHost); if (answer < 1024) { n_t = answer; } else { n_t = 1024; n_b = answer/n_t + (answer%n_t==0?0:1); } n_i = answer; cudaMemcpy(arr, d_d,(h-l+1)*sizeof(int),cudaMemcpyDeviceToHost); } } int main() { int l; srand(time(NULL)); printf("Enter size"); scanf("%d",&l); int arr[l]; for (int i = 0; i<l; i++) { arr[i] = rand ()%(2*l); } int n = sizeof( arr ) / sizeof( *arr ); quickSortIterative( arr, 0, n - 1 ); // printArr( arr, n ); return 0; }
b30f7fa62f00987c6d0d852c484de17a34db5dd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Filters // // Includes: system #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <stdint.h> #include <errno.h> #include <assert.h> #include <string.h> #include <sys/io.h> #include <cutil_inline.h> // Includes: local #include "bmp.h" enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER}; #define CLAMP_8bit(x) max(0, min(255, (x))) char *BMPInFile = "lena.bmp"; char *BMPOutFile = "output.bmp"; char *Filter = "sobel"; int FilterMode = SOBEL_FILTER; // Functions void Cleanup(void); void ParseArguments(int, char**); void FilterWrapper(unsigned char* pImageIn, int Width, int Height, float* d_sobel); // Kernels __global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height, float *g_Sobel); __global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height); __global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height, const int HIGH_BOOST_FACTOR ); /* Device Memory */ unsigned char *d_In; unsigned char *d_Out; float *d_sobel; float sobel_matrix[25]={1,2,0,-2,-1,4,8,0,-8,-4,6,12,0,-12,-6,4,8,0,-8,-4,1,2,0,-2,-1}; // Setup for kernel size const int TILE_WIDTH = 6; const int TILE_HEIGHT = 6; const int FILTER_RADIUS = 2; //const int FILTER_RADIUS = 3; const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1; const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER; const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS; const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS; const int EDGE_VALUE_THRESHOLD = 1170; const int HIGH_BOOST_FACTOR = 10; #include "filter_kernel_5x5.cu" void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete) { size_t palete_size; int fd; if((fd = open(file, O_RDONLY )) < 0) FATAL("Open Source"); if(read(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Read BMP Header"); if(read(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Read DIB Header"); assert(dib->bpp == 8); palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if(palete_size > 0) { *palete = (unsigned char *)malloc(palete_size); int go = read(fd, *palete, palete_size); if (go != palete_size) { FATAL("Read Palete"); } } *data = (unsigned char *)malloc(dib->image_size); if(read(fd, *data, dib->image_size) != dib->image_size) FATAL("Read Image"); close(fd); } void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete) { size_t palete_size; int fd; palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR |S_IRGRP)) < 0) FATAL("Open Destination"); if(write(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Write BMP Header"); if(write(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Write BMP Header"); if(palete_size != 0) { if(write(fd, palete, palete_size) != palete_size) FATAL("Write Palete"); } if(write(fd, data, dib->image_size) != dib->image_size) FATAL("Write Image"); close(fd); } void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height) { int i, j, rows, cols, startCol, endCol, startRow, endRow; const float SobelMatrix[25] = {1,2,0,-2,-1,4,8,0,-8,-4,6,12,0,-12,-6,4,8,0,-8,-4,1,2,0,-2,-1};; rows = height; cols = width; // Initialize all output pixels to zero for(i=0; i<rows; i++) { for(j=0; j<cols; j++) { imageOut[i*width + j] = 0; } } startCol = 1; endCol = cols - 1; startRow = 1; endRow = rows - 1; // Go through all inner pizel positions for(i=startRow; i<endRow; i++) { for(j=startCol; j<endCol; j++) { // sum up the 9 values to calculate both the direction x and direction y float sumX = 0, sumY=0; for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) { for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) { float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]); sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)]; sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)]; } } imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0; } } } // Host code int main(int argc, char** argv) { ParseArguments(argc, argv); struct bmp_header bmp; struct dib_header dib; unsigned char *palete = NULL; unsigned char *data = NULL, *out = NULL; printf("Running %s filter\n", Filter); BitMapRead(BMPInFile, &bmp, &dib, &data, &palete); out = (unsigned char *)malloc(dib.image_size); printf("Computing the CPU output\n"); printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size); unsigned int cpu_timer = 0; cutilCheckError(cutCreateTimer(&cpu_timer)); cutilCheckError(cutStartTimer(cpu_timer)); CPU_Sobel(data, out, dib.width, dib.height); cutilCheckError(cutStopTimer(cpu_timer)); BitMapWrite("CPU_sobel.bmp", &bmp, &dib, out, palete); printf("Done with CPU output\n"); printf("CPU time: %f (ms) \n", cutGetTimerValue(cpu_timer)); cutilCheckError(cutDeleteTimer(cpu_timer)); unsigned int sobel_mtimer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_mtimer)); d_sobel = (float*)malloc(25*sizeof(float)); if (d_sobel == 0) Cleanup(); printf("Allocating %d bytes for image \n", dib.image_size); cutilSafeCall( hipMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) ); cutilSafeCall( hipMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) ); cutilSafeCall( hipMalloc( (void **)&d_sobel, 25*sizeof(float)) ); cutilCheckError(cutStartTimer(sobel_mtimer)); //Send image to host DRAM. hipMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(d_sobel, sobel_matrix, 25*sizeof(float), hipMemcpyHostToDevice); cutilCheckError(cutStopTimer(sobel_mtimer)); printf("1st Transfer %f (ms)\n", cutGetTimerValue(sobel_mtimer)); // Call Kernels FilterWrapper(data, dib.width, dib.height, d_sobel); cutilCheckError(cutStartTimer(sobel_mtimer)); // Copy image back to host hipMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), hipMemcpyDeviceToHost); cutilCheckError(cutStopTimer(sobel_mtimer)); printf("memory Transfer Time: %f (ms)\n", cutGetTimerValue(sobel_mtimer)); cutilCheckError(cutDeleteTimer(sobel_mtimer)); // Write output image BitMapWrite(BMPOutFile, &bmp, &dib, out, palete); Cleanup(); } void Cleanup(void) { cutilSafeCall( hipDeviceReset() ); exit(0); } void FilterWrapper(unsigned char* pImageIn, int Width, int Height, float* d_sobel) { // Design grid disection around tile size int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH; int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT; dim3 dimGrid(gridWidth, gridHeight); // But actually invoke larger blocks to take care of surrounding shared memory dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); unsigned int sobel_timer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_timer)); switch(FilterMode) { case SOBEL_FILTER: printf("Sobel Filter \n"); cutilCheckError(cutStartTimer(sobel_timer)); hipLaunchKernelGGL(( SobelFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height, d_sobel); cutilCheckMsg("kernel launch failure"); cutilCheckError(cutStopTimer(sobel_timer)); printf("GPU time: %f (ms) \n", cutGetTimerValue(sobel_timer)); break; case AVERAGE_FILTER: printf("Average Filter \n"); hipLaunchKernelGGL(( AverageFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height); cutilCheckMsg("kernel launch failure"); break; case HIGH_BOOST_FILTER: printf("Boost Filter \n"); hipLaunchKernelGGL(( HighBoostFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height, HIGH_BOOST_FACTOR ); cutilCheckMsg("kernel launch failure"); break; } cutilSafeCall( hipDeviceSynchronize() ); } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) { BMPInFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) { BMPOutFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) { Filter = argv[i+1]; i = i + 1; if (strcmp(Filter, "sobel") == 0) FilterMode = SOBEL_FILTER; else if (strcmp(Filter, "average") == 0) FilterMode = AVERAGE_FILTER; else if (strcmp(Filter, "boost") == 0) FilterMode = HIGH_BOOST_FILTER; } } }
b30f7fa62f00987c6d0d852c484de17a34db5dd0.cu
// // Filters // // Includes: system #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <stdint.h> #include <errno.h> #include <assert.h> #include <string.h> #include <sys/io.h> #include <cutil_inline.h> // Includes: local #include "bmp.h" enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER}; #define CLAMP_8bit(x) max(0, min(255, (x))) char *BMPInFile = "lena.bmp"; char *BMPOutFile = "output.bmp"; char *Filter = "sobel"; int FilterMode = SOBEL_FILTER; // Functions void Cleanup(void); void ParseArguments(int, char**); void FilterWrapper(unsigned char* pImageIn, int Width, int Height, float* d_sobel); // Kernels __global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height, float *g_Sobel); __global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height); __global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height, const int HIGH_BOOST_FACTOR ); /* Device Memory */ unsigned char *d_In; unsigned char *d_Out; float *d_sobel; float sobel_matrix[25]={1,2,0,-2,-1,4,8,0,-8,-4,6,12,0,-12,-6,4,8,0,-8,-4,1,2,0,-2,-1}; // Setup for kernel size const int TILE_WIDTH = 6; const int TILE_HEIGHT = 6; const int FILTER_RADIUS = 2; //const int FILTER_RADIUS = 3; const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1; const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER; const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS; const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS; const int EDGE_VALUE_THRESHOLD = 1170; const int HIGH_BOOST_FACTOR = 10; #include "filter_kernel_5x5.cu" void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete) { size_t palete_size; int fd; if((fd = open(file, O_RDONLY )) < 0) FATAL("Open Source"); if(read(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Read BMP Header"); if(read(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Read DIB Header"); assert(dib->bpp == 8); palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if(palete_size > 0) { *palete = (unsigned char *)malloc(palete_size); int go = read(fd, *palete, palete_size); if (go != palete_size) { FATAL("Read Palete"); } } *data = (unsigned char *)malloc(dib->image_size); if(read(fd, *data, dib->image_size) != dib->image_size) FATAL("Read Image"); close(fd); } void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete) { size_t palete_size; int fd; palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR |S_IRGRP)) < 0) FATAL("Open Destination"); if(write(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Write BMP Header"); if(write(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Write BMP Header"); if(palete_size != 0) { if(write(fd, palete, palete_size) != palete_size) FATAL("Write Palete"); } if(write(fd, data, dib->image_size) != dib->image_size) FATAL("Write Image"); close(fd); } void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height) { int i, j, rows, cols, startCol, endCol, startRow, endRow; const float SobelMatrix[25] = {1,2,0,-2,-1,4,8,0,-8,-4,6,12,0,-12,-6,4,8,0,-8,-4,1,2,0,-2,-1};; rows = height; cols = width; // Initialize all output pixels to zero for(i=0; i<rows; i++) { for(j=0; j<cols; j++) { imageOut[i*width + j] = 0; } } startCol = 1; endCol = cols - 1; startRow = 1; endRow = rows - 1; // Go through all inner pizel positions for(i=startRow; i<endRow; i++) { for(j=startCol; j<endCol; j++) { // sum up the 9 values to calculate both the direction x and direction y float sumX = 0, sumY=0; for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) { for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) { float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]); sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)]; sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)]; } } imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0; } } } // Host code int main(int argc, char** argv) { ParseArguments(argc, argv); struct bmp_header bmp; struct dib_header dib; unsigned char *palete = NULL; unsigned char *data = NULL, *out = NULL; printf("Running %s filter\n", Filter); BitMapRead(BMPInFile, &bmp, &dib, &data, &palete); out = (unsigned char *)malloc(dib.image_size); printf("Computing the CPU output\n"); printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size); unsigned int cpu_timer = 0; cutilCheckError(cutCreateTimer(&cpu_timer)); cutilCheckError(cutStartTimer(cpu_timer)); CPU_Sobel(data, out, dib.width, dib.height); cutilCheckError(cutStopTimer(cpu_timer)); BitMapWrite("CPU_sobel.bmp", &bmp, &dib, out, palete); printf("Done with CPU output\n"); printf("CPU time: %f (ms) \n", cutGetTimerValue(cpu_timer)); cutilCheckError(cutDeleteTimer(cpu_timer)); unsigned int sobel_mtimer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_mtimer)); d_sobel = (float*)malloc(25*sizeof(float)); if (d_sobel == 0) Cleanup(); printf("Allocating %d bytes for image \n", dib.image_size); cutilSafeCall( cudaMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) ); cutilSafeCall( cudaMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) ); cutilSafeCall( cudaMalloc( (void **)&d_sobel, 25*sizeof(float)) ); cutilCheckError(cutStartTimer(sobel_mtimer)); //Send image to host DRAM. cudaMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(d_sobel, sobel_matrix, 25*sizeof(float), cudaMemcpyHostToDevice); cutilCheckError(cutStopTimer(sobel_mtimer)); printf("1st Transfer %f (ms)\n", cutGetTimerValue(sobel_mtimer)); // Call Kernels FilterWrapper(data, dib.width, dib.height, d_sobel); cutilCheckError(cutStartTimer(sobel_mtimer)); // Copy image back to host cudaMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), cudaMemcpyDeviceToHost); cutilCheckError(cutStopTimer(sobel_mtimer)); printf("memory Transfer Time: %f (ms)\n", cutGetTimerValue(sobel_mtimer)); cutilCheckError(cutDeleteTimer(sobel_mtimer)); // Write output image BitMapWrite(BMPOutFile, &bmp, &dib, out, palete); Cleanup(); } void Cleanup(void) { cutilSafeCall( cudaThreadExit() ); exit(0); } void FilterWrapper(unsigned char* pImageIn, int Width, int Height, float* d_sobel) { // Design grid disection around tile size int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH; int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT; dim3 dimGrid(gridWidth, gridHeight); // But actually invoke larger blocks to take care of surrounding shared memory dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); unsigned int sobel_timer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_timer)); switch(FilterMode) { case SOBEL_FILTER: printf("Sobel Filter \n"); cutilCheckError(cutStartTimer(sobel_timer)); SobelFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height, d_sobel); cutilCheckMsg("kernel launch failure"); cutilCheckError(cutStopTimer(sobel_timer)); printf("GPU time: %f (ms) \n", cutGetTimerValue(sobel_timer)); break; case AVERAGE_FILTER: printf("Average Filter \n"); AverageFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height); cutilCheckMsg("kernel launch failure"); break; case HIGH_BOOST_FILTER: printf("Boost Filter \n"); HighBoostFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height, HIGH_BOOST_FACTOR ); cutilCheckMsg("kernel launch failure"); break; } cutilSafeCall( cudaThreadSynchronize() ); } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) { BMPInFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) { BMPOutFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) { Filter = argv[i+1]; i = i + 1; if (strcmp(Filter, "sobel") == 0) FilterMode = SOBEL_FILTER; else if (strcmp(Filter, "average") == 0) FilterMode = AVERAGE_FILTER; else if (strcmp(Filter, "boost") == 0) FilterMode = HIGH_BOOST_FILTER; } } }
7fe52188c25640dd6fa2b935589920f6b6f6742c.hip
// !!! This is a file automatically generated by hipify!!! #include<stdlib.h> #include<iostream> //#include<chrono> #include"rBRIEF.cuh" #include <fstream> #include <stdlib.h> /*=============*/ #define PRINTSTATS /*=============*/ int main(int argc, char const *argv[]) { int WPB = atoi(argv[1]); // // //CPU========================================================================= // // // 1) Initialized arguments // int numPatch = 10; // int patchDim = 10; // float* patchArray = (float*) malloc(sizeof(float) * numPatch * patchDim * patchDim); // bool* binVectorArray = (bool*) malloc(sizeof(bool) * numPatch * 256); // for (int i = 0; i < numPatch * patchDim * patchDim; i++) { // patchArray[i] = static_cast <float> (rand()) / static_cast <float> (255.0); // } // extern int cpu_precompute_BRIEF_pattern[256*4]; // int* pattern = cpu_precompute_BRIEF_pattern; // // // 2) Run cpu reference // auto t1 = std::chrono::high_resolution_clock::now(); // cpu_oBRIEF(numPatch, patchDim, patchArray, binVectorArray, pattern); // auto t2 = std::chrono::high_resolution_clock::now(); // auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); // // #ifdef PRINTSTATS // std::cout << "CPU reference: " << std::endl; // printMatrix<bool*>(binVectorArray, numPatch, 256); // std::cout << "CPU implementation takes: " << duration << " microseconds" <<std::endl; // #endif // // //GPU========================================================================= // // 3) Check GPU stats #ifdef rBRIEFDEBUG struct hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); std::cout << std::endl; std::cout << "GPU Name: " << prop.name << std::endl; std::cout << "Global Memory: " << prop.totalGlobalMem << " bytes" << std::endl; std::cout << "Shared Memory per SM: " << prop.sharedMemPerBlock << " bytes" << std::endl; std::cout << "Registers per SM: " << prop.regsPerBlock << std::endl; std::cout << "Warp Size: " << prop.warpSize << std::endl; std::cout << "Number of SM: " << prop.multiProcessorCount << std::endl; std::cout << std::endl; #endif // 4) GPU initialization, memory management int K = 96; // number of pixel per patch int P = 128; // number of patches in one image int I = 1000;// number of images in the array //int S = 32; // number of bits in one binary vector float4 * gpu_patches; float * raw_patches; int4* gpu_pattern; int4* train_bin_vec; int * gpu_output; raw_patches = (float *) malloc(sizeof(float) * K * P); hipMallocManaged(&gpu_patches, sizeof(float4) * (K / 4) * P * I); hipMallocManaged(&gpu_pattern, sizeof(int4) * 256); hipMallocManaged(&train_bin_vec, sizeof(int4) * (P/4)); hipMallocManaged(&gpu_output, sizeof(int) * P * I); std::fstream myfile("./141patches.txt", std::ios_base::in); float a; // 5) Get the values of the patches for (int pixel = 0; pixel < K * P; pixel++) { myfile >> a; raw_patches[pixel] = a; } for (int img = 0; img < I; img++) { for (int pixel= 0; pixel< (K * P) / 4; pixel++) { float x = raw_patches[pixel * 4 + 0]; float y = raw_patches[pixel * 4 + 1]; float z = raw_patches[pixel * 4 + 2]; float w = raw_patches[pixel * 4 + 3]; gpu_patches[img * (K * P) / 4 + pixel] = make_float4(x,y,z,w); } } // 6) Get the values of the pattern for (int i = 0; i < 256; i++) { int x = cpu_precompute_BRIEF_pattern[i*4 + 0]; int y = cpu_precompute_BRIEF_pattern[i*4 + 1]; int z = cpu_precompute_BRIEF_pattern[i*4 + 2]; int w = cpu_precompute_BRIEF_pattern[i*4 + 3]; gpu_pattern[i] = make_int4(x,y,z,w); } //7) Get the values of the trained binary vector for (int i = 0; i < 32; i++) { int x = cpu_precomputed_BRIEF_binvec[i*4 + 0]; int y = cpu_precomputed_BRIEF_binvec[i*4 + 1]; int z = cpu_precomputed_BRIEF_binvec[i*4 + 2]; int w = cpu_precomputed_BRIEF_binvec[i*4 + 3]; train_bin_vec[i] = make_int4(x, y, z, w); } // 8) Run gpu gpu_rBRIEF(gpu_patches, gpu_output, gpu_pattern, train_bin_vec, K, P, I, WPB); hipDeviceSynchronize(); #ifdef rBRIEFDEBUG for (int i = 0; i < 128; i++) printf("%d \n", gpu_output[i]); #endif }
7fe52188c25640dd6fa2b935589920f6b6f6742c.cu
#include<stdlib.h> #include<iostream> //#include<chrono> #include"rBRIEF.cuh" #include <fstream> #include <stdlib.h> /*=============*/ #define PRINTSTATS /*=============*/ int main(int argc, char const *argv[]) { int WPB = atoi(argv[1]); // // //CPU========================================================================= // // // 1) Initialized arguments // int numPatch = 10; // int patchDim = 10; // float* patchArray = (float*) malloc(sizeof(float) * numPatch * patchDim * patchDim); // bool* binVectorArray = (bool*) malloc(sizeof(bool) * numPatch * 256); // for (int i = 0; i < numPatch * patchDim * patchDim; i++) { // patchArray[i] = static_cast <float> (rand()) / static_cast <float> (255.0); // } // extern int cpu_precompute_BRIEF_pattern[256*4]; // int* pattern = cpu_precompute_BRIEF_pattern; // // // 2) Run cpu reference // auto t1 = std::chrono::high_resolution_clock::now(); // cpu_oBRIEF(numPatch, patchDim, patchArray, binVectorArray, pattern); // auto t2 = std::chrono::high_resolution_clock::now(); // auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); // // #ifdef PRINTSTATS // std::cout << "CPU reference: " << std::endl; // printMatrix<bool*>(binVectorArray, numPatch, 256); // std::cout << "CPU implementation takes: " << duration << " microseconds" <<std::endl; // #endif // // //GPU========================================================================= // // 3) Check GPU stats #ifdef rBRIEFDEBUG struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::cout << std::endl; std::cout << "GPU Name: " << prop.name << std::endl; std::cout << "Global Memory: " << prop.totalGlobalMem << " bytes" << std::endl; std::cout << "Shared Memory per SM: " << prop.sharedMemPerBlock << " bytes" << std::endl; std::cout << "Registers per SM: " << prop.regsPerBlock << std::endl; std::cout << "Warp Size: " << prop.warpSize << std::endl; std::cout << "Number of SM: " << prop.multiProcessorCount << std::endl; std::cout << std::endl; #endif // 4) GPU initialization, memory management int K = 96; // number of pixel per patch int P = 128; // number of patches in one image int I = 1000;// number of images in the array //int S = 32; // number of bits in one binary vector float4 * gpu_patches; float * raw_patches; int4* gpu_pattern; int4* train_bin_vec; int * gpu_output; raw_patches = (float *) malloc(sizeof(float) * K * P); cudaMallocManaged(&gpu_patches, sizeof(float4) * (K / 4) * P * I); cudaMallocManaged(&gpu_pattern, sizeof(int4) * 256); cudaMallocManaged(&train_bin_vec, sizeof(int4) * (P/4)); cudaMallocManaged(&gpu_output, sizeof(int) * P * I); std::fstream myfile("./141patches.txt", std::ios_base::in); float a; // 5) Get the values of the patches for (int pixel = 0; pixel < K * P; pixel++) { myfile >> a; raw_patches[pixel] = a; } for (int img = 0; img < I; img++) { for (int pixel= 0; pixel< (K * P) / 4; pixel++) { float x = raw_patches[pixel * 4 + 0]; float y = raw_patches[pixel * 4 + 1]; float z = raw_patches[pixel * 4 + 2]; float w = raw_patches[pixel * 4 + 3]; gpu_patches[img * (K * P) / 4 + pixel] = make_float4(x,y,z,w); } } // 6) Get the values of the pattern for (int i = 0; i < 256; i++) { int x = cpu_precompute_BRIEF_pattern[i*4 + 0]; int y = cpu_precompute_BRIEF_pattern[i*4 + 1]; int z = cpu_precompute_BRIEF_pattern[i*4 + 2]; int w = cpu_precompute_BRIEF_pattern[i*4 + 3]; gpu_pattern[i] = make_int4(x,y,z,w); } //7) Get the values of the trained binary vector for (int i = 0; i < 32; i++) { int x = cpu_precomputed_BRIEF_binvec[i*4 + 0]; int y = cpu_precomputed_BRIEF_binvec[i*4 + 1]; int z = cpu_precomputed_BRIEF_binvec[i*4 + 2]; int w = cpu_precomputed_BRIEF_binvec[i*4 + 3]; train_bin_vec[i] = make_int4(x, y, z, w); } // 8) Run gpu gpu_rBRIEF(gpu_patches, gpu_output, gpu_pattern, train_bin_vec, K, P, I, WPB); cudaDeviceSynchronize(); #ifdef rBRIEFDEBUG for (int i = 0; i < 128; i++) printf("%d \n", gpu_output[i]); #endif }
99d5632aebc2e551820e01a15ad36e3c6b4dddb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> // #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/upsample_layer.hpp" namespace caffe { template <typename Dtype> __global__ void UpsampleForward(const int nthreads, int in_w, int in_h, int out_w, int out_h, const Dtype* bottom_data, const Dtype* bottom_mask, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int offset = index / (in_w * in_h) * out_w * out_h; int upsample_idx = static_cast<int>(bottom_mask[index]); top_data[offset + upsample_idx] = bottom_data[index]; } } template <typename Dtype> void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_mask = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); caffe_gpu_set(top[0]->count(), Dtype(0), top_data); int bottom_count = bottom[0]->count(); hipLaunchKernelGGL(( UpsampleForward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_count, bottom[0]->width(), bottom[0]->height(), top[0]->width(), top[0]->height(), bottom_data, bottom_mask, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void UpsampleBackward(const int nthreads, int in_w, int in_h, int out_w, int out_h, const Dtype* top_diff, const Dtype* bottom_mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int offset = index / (in_w * in_h) * out_w * out_h; int upsample_idx = static_cast<int>(bottom_mask[index]); bottom_diff[index] = top_diff[offset + upsample_idx]; } } template <typename Dtype> void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_mask = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff); hipLaunchKernelGGL(( UpsampleBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_count, bottom[0]->width(), bottom[0]->height(), top[0]->width(), top[0]->height(), top_diff, bottom_mask, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer); } // namespace caffe
99d5632aebc2e551820e01a15ad36e3c6b4dddb5.cu
#include <algorithm> #include <cfloat> #include <vector> // #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/upsample_layer.hpp" namespace caffe { template <typename Dtype> __global__ void UpsampleForward(const int nthreads, int in_w, int in_h, int out_w, int out_h, const Dtype* bottom_data, const Dtype* bottom_mask, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int offset = index / (in_w * in_h) * out_w * out_h; int upsample_idx = static_cast<int>(bottom_mask[index]); top_data[offset + upsample_idx] = bottom_data[index]; } } template <typename Dtype> void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_mask = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); caffe_gpu_set(top[0]->count(), Dtype(0), top_data); int bottom_count = bottom[0]->count(); UpsampleForward<Dtype><<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>( bottom_count, bottom[0]->width(), bottom[0]->height(), top[0]->width(), top[0]->height(), bottom_data, bottom_mask, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void UpsampleBackward(const int nthreads, int in_w, int in_h, int out_w, int out_h, const Dtype* top_diff, const Dtype* bottom_mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int offset = index / (in_w * in_h) * out_w * out_h; int upsample_idx = static_cast<int>(bottom_mask[index]); bottom_diff[index] = top_diff[offset + upsample_idx]; } } template <typename Dtype> void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_mask = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff); UpsampleBackward<Dtype><<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>( bottom_count, bottom[0]->width(), bottom[0]->height(), top[0]->width(), top[0]->height(), top_diff, bottom_mask, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer); } // namespace caffe
2004a74b387a096a38060a6f343a92096e32b6cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_cosh (int n, double *result, double *x) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = cosh(x[id]); } }
2004a74b387a096a38060a6f343a92096e32b6cb.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_cosh (int n, double *result, double *x) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = cosh(x[id]); } }
beafcabb6c7948e7a158402be4dc047ae52af25d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "bilateralFilterCudaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float3 *dev_input = NULL; hipMalloc(&dev_input, XSIZE*YSIZE); float3 *dev_output = NULL; hipMalloc(&dev_output, XSIZE*YSIZE); float l2norm = 1; int width = XSIZE; int height = YSIZE; int radius = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( bilateralFilterCudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_input,dev_output,l2norm,width,height,radius); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( bilateralFilterCudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_input,dev_output,l2norm,width,height,radius); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( bilateralFilterCudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_input,dev_output,l2norm,width,height,radius); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
beafcabb6c7948e7a158402be4dc047ae52af25d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "bilateralFilterCudaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float3 *dev_input = NULL; cudaMalloc(&dev_input, XSIZE*YSIZE); float3 *dev_output = NULL; cudaMalloc(&dev_output, XSIZE*YSIZE); float l2norm = 1; int width = XSIZE; int height = YSIZE; int radius = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); bilateralFilterCudaKernel<<<gridBlock,threadBlock>>>(dev_input,dev_output,l2norm,width,height,radius); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { bilateralFilterCudaKernel<<<gridBlock,threadBlock>>>(dev_input,dev_output,l2norm,width,height,radius); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { bilateralFilterCudaKernel<<<gridBlock,threadBlock>>>(dev_input,dev_output,l2norm,width,height,radius); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a84304827c9376edb9a22e6adf49958303bd039e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include <hip/hip_complex.h> #include "interpolate.hh" #include "Globals.h" #include "kernel.hh" #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS) #endif __device__ double d_d_dtdm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double v4=v2*v2; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(2.*e4*(240. + v2*(-120. + v*(42.*(-27. + 4.*q2)*v + (-6567. + 1996.*q2)*v3 + 48.*q*(8. + 77.*v2)*Y - 4.*q2*v*(90. + 1577.*v2)*Y2))) + e4*e2*(560. + v2*(-360. + 960.*q*v*Y + 8816.*q*v3*Y + v4*(-15565. + 24.*q2*(200. - 629.*Y2)) + v2*(-2742. + 80.*q2*(5. - 11.*Y2)))) - 8.*e2*(-48. + v2*(8. - 64.*q*v*Y - 688.*q*v3*Y + 2.*v2*(99. + 16.*q2*(-1. + 2.*Y2)) + v4*(1233. + 8.*q2*(-47. + 150.*Y2)))) + 16.*(16. + v2*(24. + v2*(27.*(2. + 5.*v2) - 48.*q*v*Y + 4.*q2*(2. - v2 + (-2. + 3.*v2)*Y2)))))/(256.*v4); return eq; } __device__ double d_d_dphidm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(3. + e2)*v2 - 16.*q*v3*(-2. + (3. + e2)*Y) - 8.*q*v3*v2*(-6. + 15.*Y + 3.*e4*Y + 2.*e2*(-4. + 7.*Y)) + 2.*v2*v2*(27. + 3.*e4 + 2.*q2*(-1. + Y)*(1. + 7.*Y) + 2.*e2*(9. + q2*(1. + Y2))) + v3*v3*(5.*e4*e2 + e4*(45. + q2*(2. + 26.*Y2)) + e2*(135. + 4.*q2*(-19. + 5.*Y*(-7. + 9.*Y))) + 3.*(45. + 2.*q2*(-9. + Y*(-6. + 19.*Y)))))/(16.*v); return eq; } // ----- magnitude of azimuthal angular frequency for prograde/retrograde orbits ----- __device__ double d_OmegaPhi(double v, double e, double cosiota, double s, double M){ double omegaphi; if(cosiota>0) omegaphi=d_d_dphidm(v,e,cosiota,s)/d_d_dtdm(v,e,cosiota,s)/M; else omegaphi=d_d_dphidm(v,e,-cosiota,-s)/d_d_dtdm(v,e,-cosiota,-s)/M; return omegaphi; } __device__ void d_cross(const double *u,const double *v,double *w){ w[0] = u[1]*v[2]-u[2]*v[1]; w[1] = u[2]*v[0]-u[0]*v[2]; w[2] = u[0]*v[1]-u[1]*v[0]; } __device__ double d_dot_product(const double *u,const double *v){ return u[0]*v[0] + u[1]*v[1] + u[2]*v[2]; } __device__ double d_vec_norm(const double *u){ return sqrt(u[0]*u[0] + u[1]*u[1] + u[2]*u[2]); } __device__ void d_RotCoeff(double rot[],double iota,double theta_S,double phi_S,double theta_K,double phi_K,double alpha){ double n[3]; double L[3]; double S[3]; double nxL[3]; double nxS[3]; n[0] = sin(theta_S)*cos(phi_S); n[1] = sin(theta_S)*sin(phi_S); n[2] = cos(theta_S); S[0] = sin(theta_K)*cos(phi_K); S[1] = sin(theta_K)*sin(phi_K); S[2] = cos(theta_K); L[0] = cos(iota)*sin(theta_K)*cos(phi_K)+sin(iota)*(sin(alpha)*sin(phi_K)-cos(alpha)*cos(theta_K)*cos(phi_K)); L[1] = cos(iota)*sin(theta_K)*sin(phi_K)-sin(iota)*(sin(alpha)*cos(phi_K)+cos(alpha)*cos(theta_K)*sin(phi_K)); L[2] = cos(iota)*cos(theta_K)+sin(iota)*cos(alpha)*sin(theta_K); d_cross(n,L,nxL); d_cross(n,S,nxS); double norm=d_vec_norm(nxL)*d_vec_norm(nxS); double dot,cosrot,sinrot; //gsl_blas_ddot(nxL,nxS,&dot); dot = d_dot_product(nxL,nxS); cosrot=dot/norm; //gsl_blas_ddot(L,nxS,&dot); dot = d_dot_product(L,nxS); sinrot=dot; //gsl_blas_ddot(S,nxL,&dot); dot = d_dot_product(S,nxL); sinrot-=dot; sinrot/=norm; rot[0]=2.*cosrot*cosrot-1.; rot[1]=cosrot*sinrot; rot[2]=-rot[1]; rot[3]=rot[0]; } __device__ void find_index_and_xout(int *index, double *x_out, double *x_out2, double *x_out3, double dx, double x_new, double *x_old, int length){ double x_trans; *index = (int)floor(x_new/dx); // assumes first time is zero if (*index >= length - 1) *index = length - 2; x_trans = (x_new - x_old[*index]); *x_out = x_trans; *x_out2 = x_trans*x_trans; *x_out3 = x_trans*x_trans*x_trans; //printf("interp %d, %e %e, %e, %e, %e, %e\n", *index, dx, x_old[0], x_new, x_old[*index], x_old[*index+1], x_trans); /*# if __CUDA_ARCH__>=200 if (x_new == 1.000100e+06) printf("interp %d, %e %e, %e, %e, %e, %e\n", *index, dx, x_old[0], x_new, x_old[*index], x_old[*index+1], x_trans); #endif //*/ } __device__ double interpolate_array(InterpArrayContainer array_container, double x, double x2, double x3, int index, double x_new){ double coeff_0 = array_container.array[index]; double coeff_1 = array_container.coeff_1[index]; double coeff_2 = array_container.coeff_2[index]; double coeff_3 = array_container.coeff_3[index]; double return_val = coeff_0 + coeff_1*x + coeff_2*x2 + coeff_3*x3; // printf("interp2 %d, %e, %e %e, %e, %e, %.18e, %.18e, %.18e, %.18e\n", index, return_val, x, x2, x3, x_new, coeff_0, coeff_1, coeff_2, coeff_3); /*# if __CUDA_ARCH__>=200 if ((x_new <= 100.0)) printf("interp2 %d, %e %e, %e, %e, %.18e, %.18e, %.18e, %.18e\n", index, x, x2, x3, x_new, coeff_0, coeff_1, coeff_2, coeff_3); #endif //*/ return return_val; } __device__ double d_dtdm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double v4=v2*v2; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(2.*e4*(240. + v2*(-120. + v*(42.*(-27. + 4.*q2)*v + (-6567. + 1996.*q2)*v3 + 48.*q*(8. + 77.*v2)*Y - 4.*q2*v*(90. + 1577.*v2)*Y2))) + e4*e2*(560. + v2*(-360. + 960.*q*v*Y + 8816.*q*v3*Y + v4*(-15565. + 24.*q2*(200. - 629.*Y2)) + v2*(-2742. + 80.*q2*(5. - 11.*Y2)))) - 8.*e2*(-48. + v2*(8. - 64.*q*v*Y - 688.*q*v3*Y + 2.*v2*(99. + 16.*q2*(-1. + 2.*Y2)) + v4*(1233. + 8.*q2*(-47. + 150.*Y2)))) + 16.*(16. + v2*(24. + v2*(27.*(2. + 5.*v2) - 48.*q*v*Y + 4.*q2*(2. - v2 + (-2. + 3.*v2)*Y2)))))/(256.*v4); return eq; } __device__ double d_drdm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(-3. + e2)*v2 - 16.*(-3. + e2)*q*v3*Y + 8.*(33. + 4.*e2 - 3.*e4)*q*v3*v2*Y + v3*v3*(-351. + 132.*q2 + e2*(-135. + 21.*e2 + 5.*e4 + 2.*(7. + e2)*q2) + 2.*(-204. + 13.*e2*(-3. + e2))*q2*Y2) + 2.*v2*v2*(-45. + 3.*e4 + 4.*q2*(1. - 4.*Y2) + 2.*e2*q2*(1. + Y2)))/(16.*v); return eq; } __device__ double d_dthetadm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(3. + e2)*v2 - 16.*(3. + e2)*q*v3*Y - 8.*(3. + e2)*(5. + 3.*e2)*q*v3*v2*Y + v3*v3*(135. - 54.*q2 + e2*(5.*(27. + 9.*e2 + e4) + 2.*(-38. + e2)*q2) + 2.*(57. + 90.*e2 + 13.*e4)*q2*Y2) + 2.*v2*v2*(27. + 3.*e4 + 2.*q2*(-1. + 7.*Y2) + 2.*e2*(9. + q2*(1. + Y2))))/(16.*v); return eq; } __device__ double d_dphidm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(3. + e2)*v2 - 16.*q*v3*(-2. + (3. + e2)*Y) - 8.*q*v3*v2*(-6. + 15.*Y + 3.*e4*Y + 2.*e2*(-4. + 7.*Y)) + 2.*v2*v2*(27. + 3.*e4 + 2.*q2*(-1. + Y)*(1. + 7.*Y) + 2.*e2*(9. + q2*(1. + Y2))) + v3*v3*(5.*e4*e2 + e4*(45. + q2*(2. + 26.*Y2)) + e2*(135. + 4.*q2*(-19. + 5.*Y*(-7. + 9.*Y))) + 3.*(45. + 2.*q2*(-9. + Y*(-6. + 19.*Y)))))/(16.*v); return eq; } __global__ void produce_phasing(double *e_out, double *v_out, double *M_out, double *S_out, double *gimdot_out, double *nu_out, double *alpdot_out, double *gim_out, double *Phi_out, double *alp_out, double *tvec, InterpArrayContainer evec, InterpArrayContainer vvec, InterpArrayContainer Mvec, InterpArrayContainer Svec, double lam, int init_length, double init_dt, double timestep, double t_clip, int run_length) { int index; double x, x2, x3; int i = blockIdx.x * blockDim.x + threadIdx.x; double t = timestep*i; if (t > t_clip) return; double tm = timestep*(i-1); if (i==0) tm = t; double coslam=cos(lam); double sinlam=sin(lam); find_index_and_xout(&index, &x, &x2, &x3, init_dt, tm, tvec, init_length); double e=interpolate_array(evec, x, x2, x3, index, timestep*i); //evec.array[i]; double v=interpolate_array(vvec, x, x2, x3, index, timestep*i); //vvec.array[i]; double M=interpolate_array(Mvec, x, x2, x3, index, timestep*i); //Mvec.array[i]; double S=interpolate_array(Svec, x, x2, x3, index, timestep*i); //Svec.array[i]; double gimdotm=(d_dthetadm(v,e,coslam,S)-d_drdm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; double Phidotm=d_drdm(v,e,coslam,S)/d_dtdm(v,e,coslam,S)/M; double alpdotm=(d_dphidm(v,e,coslam,S)-d_dthetadm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; find_index_and_xout(&index, &x, &x2, &x3, init_dt, t, tvec, init_length); e=interpolate_array(evec, x, x2, x3, index, timestep*i); //evec.array[i]; v=interpolate_array(vvec, x, x2, x3, index, timestep*i); //vvec.array[i]; M=interpolate_array(Mvec, x, x2, x3, index, timestep*i); //Mvec.array[i]; S=interpolate_array(Svec, x, x2, x3, index, timestep*i); //Svec.array[i]; double gimdot=(d_dthetadm(v,e,coslam,S)-d_drdm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; double Phidot=d_drdm(v,e,coslam,S)/d_dtdm(v,e,coslam,S)/M; double alpdot=(d_dphidm(v,e,coslam,S)-d_dthetadm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; //double nu=Phidot/2./M_PI; e_out[i] = e; v_out[i] = v; M_out[i] = M; S_out[i] = S; gimdot_out[i] = gimdot; nu_out[i] = Phidot/2./M_PI;; alpdot_out[i] = alpdot; if (i >= run_length-1) return; gim_out[i+1] = (1.5*gimdot-.5*gimdotm)*timestep; Phi_out[i+1] = (1.5*Phidot-.5*Phidotm)*timestep; alp_out[i+1] = (1.5*alpdot-.5*alpdotm)*timestep; //double nu=Phidot/2./M_PI; } __global__ void prescan0(double *arr, double arr0, double *temp) { *arr = arr0; *temp = 0.0; } __global__ void prescan1(double *g_idata, int n, double *temp, int num_sum_per_thread) { int start_ind = (blockIdx.x*blockDim.x + threadIdx.x)*num_sum_per_thread; if (start_ind >= n) return; int end_ind = start_ind + num_sum_per_thread - 1; if (end_ind >= n) end_ind = n-1; for(int i=start_ind; i<end_ind; i++) g_idata[i+1]+=g_idata[i]; int temp_ind = start_ind / num_sum_per_thread + 1; temp[temp_ind] = g_idata[end_ind]; } __global__ void prescan2(int n, double *temp, int num_sum_per_thread) { for(int i=0; i<n-1; i++) temp[i+1] += temp[i]; } __global__ void prescan3(double *g_idata, int n, double *temp, int num_sum_per_thread) { for(int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += blockDim.x*gridDim.x){ int temp_ind = i / num_sum_per_thread; g_idata[i] += temp[temp_ind]; } } #define gpuErrchk_kern(ans) { gpuAssert_kern((ans), __FILE__, __LINE__); } inline void gpuAssert_kern(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void cumsum(double *data, double phase0, int n){ int num_sum_per_thread = 256; int NUM_THREADS = 256; int num_needed_threads = ::ceil(n/num_sum_per_thread); int num_blocks_prescan1 = ::ceil((num_needed_threads + 1 + NUM_THREADS -1)/NUM_THREADS); double *temp; gpuErrchk_kern(hipMalloc(&temp, (num_needed_threads+1)*sizeof(double))); hipLaunchKernelGGL(( prescan0), dim3(1),dim3(1), 0, 0, data, phase0, temp); hipDeviceSynchronize(); gpuErrchk_kern(hipGetLastError()); hipLaunchKernelGGL(( prescan1), dim3(num_blocks_prescan1), dim3(NUM_THREADS), 0, 0, data, n, temp, num_sum_per_thread); hipDeviceSynchronize(); gpuErrchk_kern(hipGetLastError()); hipLaunchKernelGGL(( prescan2), dim3(1),dim3(1), 0, 0, num_needed_threads+1, temp, num_sum_per_thread); hipDeviceSynchronize(); gpuErrchk_kern(hipGetLastError()); int num_blocks_prescan3 = ::ceil((n + 1 + NUM_THREADS -1)/NUM_THREADS); hipLaunchKernelGGL(( prescan3), dim3(num_blocks_prescan3), dim3(NUM_THREADS), 0, 0, data, n, temp, num_sum_per_thread); hipDeviceSynchronize(); gpuErrchk_kern(hipGetLastError()); gpuErrchk_kern(hipFree(temp)); } __global__ void kernel_create_waveform(double *t, double *hI, double *hII, double *tvec, double *evec, double *vvec, double *Mvec, double *Svec, double *gimvec, double *Phivec, double *alpvec, double *nuvec, double *gimdotvec, double lam, double qS, double phiS, double qK, double phiK, bool mich, int init_length, int vlength,int nmodes, int i_plunge, int i_buffer, double zeta, double M_phys, double init_dt, double timestep, int run_length){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= vlength) return; if (i >= run_length) { hI[i] = 0.0; hII[i] = 0.0; return; } // ---------- // TODO: calculate this first section before gpu double coslam=cos(lam); double sinlam=sin(lam); double cosqS=cos(qS); double sinqS=sin(qS); double cosqK=cos(qK); double sinqK=sin(qK); double cosphiK=cos(phiK); double sinphiK=sin(phiK); double halfsqrt3=sqrt(3.)/2.; // ----- compute waveform from t_start to t_end ----- //for(int i=0;i<vlength;i++){ double time = timestep*i; t[i]= time; double t_plunge=i_plunge*timestep; double t_zero=t_plunge+timestep*i_buffer; if (time <=t_zero){ hI[i]=0.; hII[i]=0.; double e=evec[i]; //evec.array[i]; double v=vvec[i]; //vvec.array[i]; double M=Mvec[i]; //Mvec.array[i]; double S=Svec[i]; //Svec.array[i]; double gim=gimvec[i]; //gimvec.array[i]; double Phi=Phivec[i]; //Phivec.array[i]; double alp=alpvec[i]; //alpvec.array[i]; double nu=nuvec[i]; //nuvec.array[i]; double gimdot=gimdotvec[i]; //gimdotvec.array[i]; /*# if __CUDA_ARCH__>=200 //if ((index >= 12000) && (index <= 12100)) //printf("%d, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e\n", i, e, t[i], tvec[index], tvec[index+1], evec.array[index], evec.coeff_1[index], evec.coeff_2[index], evec.coeff_3[index], x, x2, x3); printf("%d, %.18e, %.18e, %.18e, %.18e\n", i, t[i], e, nu, gimdot); #endif //*/ double cosalp=cos(alp); double sinalp=sin(alp); double cosqL=cosqK*coslam+sinqK*sinlam*cosalp; double sinqL=sqrt(1.-cosqL*cosqL); double phiLup=sinqK*sinphiK*coslam-cosphiK*sinlam*sinalp-cosqK*sinphiK*sinlam*cosalp; double phiLdown=sinqK*cosphiK*coslam+sinphiK*sinlam*sinalp-cosqK*cosphiK*sinlam*cosalp; double phiL=atan2(phiLup,phiLdown); double Ldotn=cosqL*cosqS+sinqL*sinqS*cos(phiL-phiS); double Ldotn2=Ldotn*Ldotn; double Sdotn=cosqK*cosqS+sinqK*sinqS*cos(phiK-phiS); double betaup=-Sdotn+coslam*Ldotn; double betadown=sinqS*sin(phiK-phiS)*sinlam*cosalp+(cosqK*Sdotn-cosqS)/sinqK*sinlam*sinalp; double beta=atan2(betaup,betadown); double gam=2.*(gim+beta); double cos2gam=cos(gam); double sin2gam=sin(gam); double orbphs,cosorbphs,sinorbphs,FplusI,FcrosI,FplusII,FcrosII; if(mich){ orbphs=2.*M_PI*t[i]/year; cosorbphs=cos(orbphs-phiS); sinorbphs=sin(orbphs-phiS); double cosq=.5*cosqS-halfsqrt3*sinqS*cosorbphs; double phiw=orbphs+atan2(halfsqrt3*cosqS+.5*sinqS*cosorbphs,sinqS*sinorbphs); double psiup=.5*cosqK-halfsqrt3*sinqK*cos(orbphs-phiK)-cosq*(cosqK*cosqS+sinqK*sinqS*cos(phiK-phiS)); double psidown=.5*sinqK*sinqS*sin(phiK-phiS)-halfsqrt3*cos(orbphs)*(cosqK*sinqS*sin(phiS)-cosqS*sinqK*sin(phiK))-halfsqrt3*sin(orbphs)*(cosqS*sinqK*cos(phiK)-cosqK*sinqS*cos(phiS)); double psi=atan2(psiup,psidown); double cosq1=.5*(1.+cosq*cosq); double cos2phi=cos(2.*phiw); double sin2phi=sin(2.*phiw); double cos2psi=cos(2.*psi); double sin2psi=sin(2.*psi); FplusI=cosq1*cos2phi*cos2psi-cosq*sin2phi*sin2psi; FcrosI=cosq1*cos2phi*sin2psi+cosq*sin2phi*cos2psi; FplusII=cosq1*sin2phi*cos2psi+cosq*cos2phi*sin2psi; FcrosII=cosq1*sin2phi*sin2psi-cosq*cos2phi*cos2psi; } else{ FplusI=1.; FcrosI=0.; FplusII=0.; FcrosII=1.; } double Amp=pow(d_OmegaPhi(v,e,coslam,S,M)*M_phys*SOLARMASSINSEC,2./3.)*zeta; // TODO: check making num modes to gridDim (then need to do reduction to get singular waveform) double fn,Doppler,nPhi; double ne, a, b, c, Aplus, Acros, Aplusold, Acrosold; double rot[4], J[5]; for(int n=1;n<=nmodes;n++){ if(mich){ fn=n*nu+gimdot/M_PI; Doppler=2.*M_PI*fn*AUsec*sinqS*cosorbphs; nPhi=n*Phi+Doppler; } else nPhi=n*Phi; ne=n*e; if(n==1){ J[0]=-1.0*j1(ne); J[1]=j0(ne); J[2]=j1(ne); J[3]=jn(2,ne); J[4]=jn(3,ne); } else{ J[0]=jn(n-2, ne); J[1]=jn(n-1, ne); J[2]=jn(n, ne); J[3]=jn(n+1,ne); J[4]=jn(n+2,ne); } a=-n*Amp*(J[0]-2.*e*J[1]+2./n*J[2]+2.*e*J[3]-J[4])*cos(nPhi); b=-n*Amp*sqrt(1-e*e)*(J[0]-2.*J[2]+J[4])*sin(nPhi); c=2.*Amp*J[2]*cos(nPhi); Aplus=-(1.+Ldotn2)*(a*cos2gam-b*sin2gam)+c*(1-Ldotn2); Acros=2.*Ldotn*(b*cos2gam+a*sin2gam); // ----- rotate to NK wave frame ----- Aplusold=Aplus; Acrosold=Acros; d_RotCoeff(rot,lam,qS,phiS,qK,phiK,alp); Aplus=Aplusold*rot[0]+Acrosold*rot[1]; Acros=Aplusold*rot[2]+Acrosold*rot[3]; // ---------- double hnI,hnII; if(mich){ hnI=halfsqrt3*(FplusI*Aplus+FcrosI*Acros); hnII=halfsqrt3*(FplusII*Aplus+FcrosII*Acros); } else{ hnI=FplusI*Aplus+FcrosI*Acros; hnII=FplusII*Aplus+FcrosII*Acros; } hI[i]+=hnI; hII[i]+=hnII; } } if ((time>t_plunge) &&(i<vlength)){ if(time<t_zero){ hI[i]=hI[i]/(exp((t_plunge-t_zero)/(t[i]-t_plunge)+(t_plunge-t_zero)/(t[i]-t_zero))+1.); hII[i]=hII[i]/(exp((t_plunge-t_zero)/(t[i]-t_plunge)+(t_plunge-t_zero)/(t[i]-t_zero))+1.); } else{ hI[i]=0.; hII[i]=0.; } // ---------- } /*# if __CUDA_ARCH__>=200 if (i == 1000) printf("%d, %.18e, %.18e\n", i, hI[i], hII[i]); #endif //*/ } __global__ void likelihood_prep(hipDoubleComplex *template_channel1, hipDoubleComplex *template_channel2, double *noise_channel1_inv, double *noise_channel2_inv, int length){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= length) return; /*# if __CUDA_ARCH__>=200 if (i == 1000) printf("%d, %.18e, %.18e, %.18e\n", i, cuCreal(template_channel1[i]), cuCimag(template_channel2[i]), noise_channel1_inv[i]); #endif //*/ template_channel1[i] = cuCmul(template_channel1[i], make_cuDoubleComplex(noise_channel1_inv[i], 0.0)); template_channel2[i] = cuCmul(template_channel2[i], make_cuDoubleComplex(noise_channel2_inv[i], 0.0)); }
a84304827c9376edb9a22e6adf49958303bd039e.cu
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <cuComplex.h> #include "interpolate.hh" #include "Globals.h" #include "kernel.hh" #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS) #endif __device__ double d_d_dtdm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double v4=v2*v2; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(2.*e4*(240. + v2*(-120. + v*(42.*(-27. + 4.*q2)*v + (-6567. + 1996.*q2)*v3 + 48.*q*(8. + 77.*v2)*Y - 4.*q2*v*(90. + 1577.*v2)*Y2))) + e4*e2*(560. + v2*(-360. + 960.*q*v*Y + 8816.*q*v3*Y + v4*(-15565. + 24.*q2*(200. - 629.*Y2)) + v2*(-2742. + 80.*q2*(5. - 11.*Y2)))) - 8.*e2*(-48. + v2*(8. - 64.*q*v*Y - 688.*q*v3*Y + 2.*v2*(99. + 16.*q2*(-1. + 2.*Y2)) + v4*(1233. + 8.*q2*(-47. + 150.*Y2)))) + 16.*(16. + v2*(24. + v2*(27.*(2. + 5.*v2) - 48.*q*v*Y + 4.*q2*(2. - v2 + (-2. + 3.*v2)*Y2)))))/(256.*v4); return eq; } __device__ double d_d_dphidm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(3. + e2)*v2 - 16.*q*v3*(-2. + (3. + e2)*Y) - 8.*q*v3*v2*(-6. + 15.*Y + 3.*e4*Y + 2.*e2*(-4. + 7.*Y)) + 2.*v2*v2*(27. + 3.*e4 + 2.*q2*(-1. + Y)*(1. + 7.*Y) + 2.*e2*(9. + q2*(1. + Y2))) + v3*v3*(5.*e4*e2 + e4*(45. + q2*(2. + 26.*Y2)) + e2*(135. + 4.*q2*(-19. + 5.*Y*(-7. + 9.*Y))) + 3.*(45. + 2.*q2*(-9. + Y*(-6. + 19.*Y)))))/(16.*v); return eq; } // ----- magnitude of azimuthal angular frequency for prograde/retrograde orbits ----- __device__ double d_OmegaPhi(double v, double e, double cosiota, double s, double M){ double omegaphi; if(cosiota>0) omegaphi=d_d_dphidm(v,e,cosiota,s)/d_d_dtdm(v,e,cosiota,s)/M; else omegaphi=d_d_dphidm(v,e,-cosiota,-s)/d_d_dtdm(v,e,-cosiota,-s)/M; return omegaphi; } __device__ void d_cross(const double *u,const double *v,double *w){ w[0] = u[1]*v[2]-u[2]*v[1]; w[1] = u[2]*v[0]-u[0]*v[2]; w[2] = u[0]*v[1]-u[1]*v[0]; } __device__ double d_dot_product(const double *u,const double *v){ return u[0]*v[0] + u[1]*v[1] + u[2]*v[2]; } __device__ double d_vec_norm(const double *u){ return sqrt(u[0]*u[0] + u[1]*u[1] + u[2]*u[2]); } __device__ void d_RotCoeff(double rot[],double iota,double theta_S,double phi_S,double theta_K,double phi_K,double alpha){ double n[3]; double L[3]; double S[3]; double nxL[3]; double nxS[3]; n[0] = sin(theta_S)*cos(phi_S); n[1] = sin(theta_S)*sin(phi_S); n[2] = cos(theta_S); S[0] = sin(theta_K)*cos(phi_K); S[1] = sin(theta_K)*sin(phi_K); S[2] = cos(theta_K); L[0] = cos(iota)*sin(theta_K)*cos(phi_K)+sin(iota)*(sin(alpha)*sin(phi_K)-cos(alpha)*cos(theta_K)*cos(phi_K)); L[1] = cos(iota)*sin(theta_K)*sin(phi_K)-sin(iota)*(sin(alpha)*cos(phi_K)+cos(alpha)*cos(theta_K)*sin(phi_K)); L[2] = cos(iota)*cos(theta_K)+sin(iota)*cos(alpha)*sin(theta_K); d_cross(n,L,nxL); d_cross(n,S,nxS); double norm=d_vec_norm(nxL)*d_vec_norm(nxS); double dot,cosrot,sinrot; //gsl_blas_ddot(nxL,nxS,&dot); dot = d_dot_product(nxL,nxS); cosrot=dot/norm; //gsl_blas_ddot(L,nxS,&dot); dot = d_dot_product(L,nxS); sinrot=dot; //gsl_blas_ddot(S,nxL,&dot); dot = d_dot_product(S,nxL); sinrot-=dot; sinrot/=norm; rot[0]=2.*cosrot*cosrot-1.; rot[1]=cosrot*sinrot; rot[2]=-rot[1]; rot[3]=rot[0]; } __device__ void find_index_and_xout(int *index, double *x_out, double *x_out2, double *x_out3, double dx, double x_new, double *x_old, int length){ double x_trans; *index = (int)floor(x_new/dx); // assumes first time is zero if (*index >= length - 1) *index = length - 2; x_trans = (x_new - x_old[*index]); *x_out = x_trans; *x_out2 = x_trans*x_trans; *x_out3 = x_trans*x_trans*x_trans; //printf("interp %d, %e %e, %e, %e, %e, %e\n", *index, dx, x_old[0], x_new, x_old[*index], x_old[*index+1], x_trans); /*# if __CUDA_ARCH__>=200 if (x_new == 1.000100e+06) printf("interp %d, %e %e, %e, %e, %e, %e\n", *index, dx, x_old[0], x_new, x_old[*index], x_old[*index+1], x_trans); #endif //*/ } __device__ double interpolate_array(InterpArrayContainer array_container, double x, double x2, double x3, int index, double x_new){ double coeff_0 = array_container.array[index]; double coeff_1 = array_container.coeff_1[index]; double coeff_2 = array_container.coeff_2[index]; double coeff_3 = array_container.coeff_3[index]; double return_val = coeff_0 + coeff_1*x + coeff_2*x2 + coeff_3*x3; // printf("interp2 %d, %e, %e %e, %e, %e, %.18e, %.18e, %.18e, %.18e\n", index, return_val, x, x2, x3, x_new, coeff_0, coeff_1, coeff_2, coeff_3); /*# if __CUDA_ARCH__>=200 if ((x_new <= 100.0)) printf("interp2 %d, %e %e, %e, %e, %.18e, %.18e, %.18e, %.18e\n", index, x, x2, x3, x_new, coeff_0, coeff_1, coeff_2, coeff_3); #endif //*/ return return_val; } __device__ double d_dtdm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double v4=v2*v2; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(2.*e4*(240. + v2*(-120. + v*(42.*(-27. + 4.*q2)*v + (-6567. + 1996.*q2)*v3 + 48.*q*(8. + 77.*v2)*Y - 4.*q2*v*(90. + 1577.*v2)*Y2))) + e4*e2*(560. + v2*(-360. + 960.*q*v*Y + 8816.*q*v3*Y + v4*(-15565. + 24.*q2*(200. - 629.*Y2)) + v2*(-2742. + 80.*q2*(5. - 11.*Y2)))) - 8.*e2*(-48. + v2*(8. - 64.*q*v*Y - 688.*q*v3*Y + 2.*v2*(99. + 16.*q2*(-1. + 2.*Y2)) + v4*(1233. + 8.*q2*(-47. + 150.*Y2)))) + 16.*(16. + v2*(24. + v2*(27.*(2. + 5.*v2) - 48.*q*v*Y + 4.*q2*(2. - v2 + (-2. + 3.*v2)*Y2)))))/(256.*v4); return eq; } __device__ double d_drdm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(-3. + e2)*v2 - 16.*(-3. + e2)*q*v3*Y + 8.*(33. + 4.*e2 - 3.*e4)*q*v3*v2*Y + v3*v3*(-351. + 132.*q2 + e2*(-135. + 21.*e2 + 5.*e4 + 2.*(7. + e2)*q2) + 2.*(-204. + 13.*e2*(-3. + e2))*q2*Y2) + 2.*v2*v2*(-45. + 3.*e4 + 4.*q2*(1. - 4.*Y2) + 2.*e2*q2*(1. + Y2)))/(16.*v); return eq; } __device__ double d_dthetadm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(3. + e2)*v2 - 16.*(3. + e2)*q*v3*Y - 8.*(3. + e2)*(5. + 3.*e2)*q*v3*v2*Y + v3*v3*(135. - 54.*q2 + e2*(5.*(27. + 9.*e2 + e4) + 2.*(-38. + e2)*q2) + 2.*(57. + 90.*e2 + 13.*e4)*q2*Y2) + 2.*v2*v2*(27. + 3.*e4 + 2.*q2*(-1. + 7.*Y2) + 2.*e2*(9. + q2*(1. + Y2))))/(16.*v); return eq; } __device__ double d_dphidm(double v,double e,double Y,double q){ double v2=v*v; double v3=v2*v; double e2=e*e; double e4=e2*e2; double Y2=Y*Y; double q2=q*q; double eq=(16. + 8.*(3. + e2)*v2 - 16.*q*v3*(-2. + (3. + e2)*Y) - 8.*q*v3*v2*(-6. + 15.*Y + 3.*e4*Y + 2.*e2*(-4. + 7.*Y)) + 2.*v2*v2*(27. + 3.*e4 + 2.*q2*(-1. + Y)*(1. + 7.*Y) + 2.*e2*(9. + q2*(1. + Y2))) + v3*v3*(5.*e4*e2 + e4*(45. + q2*(2. + 26.*Y2)) + e2*(135. + 4.*q2*(-19. + 5.*Y*(-7. + 9.*Y))) + 3.*(45. + 2.*q2*(-9. + Y*(-6. + 19.*Y)))))/(16.*v); return eq; } __global__ void produce_phasing(double *e_out, double *v_out, double *M_out, double *S_out, double *gimdot_out, double *nu_out, double *alpdot_out, double *gim_out, double *Phi_out, double *alp_out, double *tvec, InterpArrayContainer evec, InterpArrayContainer vvec, InterpArrayContainer Mvec, InterpArrayContainer Svec, double lam, int init_length, double init_dt, double timestep, double t_clip, int run_length) { int index; double x, x2, x3; int i = blockIdx.x * blockDim.x + threadIdx.x; double t = timestep*i; if (t > t_clip) return; double tm = timestep*(i-1); if (i==0) tm = t; double coslam=cos(lam); double sinlam=sin(lam); find_index_and_xout(&index, &x, &x2, &x3, init_dt, tm, tvec, init_length); double e=interpolate_array(evec, x, x2, x3, index, timestep*i); //evec.array[i]; double v=interpolate_array(vvec, x, x2, x3, index, timestep*i); //vvec.array[i]; double M=interpolate_array(Mvec, x, x2, x3, index, timestep*i); //Mvec.array[i]; double S=interpolate_array(Svec, x, x2, x3, index, timestep*i); //Svec.array[i]; double gimdotm=(d_dthetadm(v,e,coslam,S)-d_drdm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; double Phidotm=d_drdm(v,e,coslam,S)/d_dtdm(v,e,coslam,S)/M; double alpdotm=(d_dphidm(v,e,coslam,S)-d_dthetadm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; find_index_and_xout(&index, &x, &x2, &x3, init_dt, t, tvec, init_length); e=interpolate_array(evec, x, x2, x3, index, timestep*i); //evec.array[i]; v=interpolate_array(vvec, x, x2, x3, index, timestep*i); //vvec.array[i]; M=interpolate_array(Mvec, x, x2, x3, index, timestep*i); //Mvec.array[i]; S=interpolate_array(Svec, x, x2, x3, index, timestep*i); //Svec.array[i]; double gimdot=(d_dthetadm(v,e,coslam,S)-d_drdm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; double Phidot=d_drdm(v,e,coslam,S)/d_dtdm(v,e,coslam,S)/M; double alpdot=(d_dphidm(v,e,coslam,S)-d_dthetadm(v,e,coslam,S))/d_dtdm(v,e,coslam,S)/M; //double nu=Phidot/2./M_PI; e_out[i] = e; v_out[i] = v; M_out[i] = M; S_out[i] = S; gimdot_out[i] = gimdot; nu_out[i] = Phidot/2./M_PI;; alpdot_out[i] = alpdot; if (i >= run_length-1) return; gim_out[i+1] = (1.5*gimdot-.5*gimdotm)*timestep; Phi_out[i+1] = (1.5*Phidot-.5*Phidotm)*timestep; alp_out[i+1] = (1.5*alpdot-.5*alpdotm)*timestep; //double nu=Phidot/2./M_PI; } __global__ void prescan0(double *arr, double arr0, double *temp) { *arr = arr0; *temp = 0.0; } __global__ void prescan1(double *g_idata, int n, double *temp, int num_sum_per_thread) { int start_ind = (blockIdx.x*blockDim.x + threadIdx.x)*num_sum_per_thread; if (start_ind >= n) return; int end_ind = start_ind + num_sum_per_thread - 1; if (end_ind >= n) end_ind = n-1; for(int i=start_ind; i<end_ind; i++) g_idata[i+1]+=g_idata[i]; int temp_ind = start_ind / num_sum_per_thread + 1; temp[temp_ind] = g_idata[end_ind]; } __global__ void prescan2(int n, double *temp, int num_sum_per_thread) { for(int i=0; i<n-1; i++) temp[i+1] += temp[i]; } __global__ void prescan3(double *g_idata, int n, double *temp, int num_sum_per_thread) { for(int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += blockDim.x*gridDim.x){ int temp_ind = i / num_sum_per_thread; g_idata[i] += temp[temp_ind]; } } #define gpuErrchk_kern(ans) { gpuAssert_kern((ans), __FILE__, __LINE__); } inline void gpuAssert_kern(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void cumsum(double *data, double phase0, int n){ int num_sum_per_thread = 256; int NUM_THREADS = 256; int num_needed_threads = std::ceil(n/num_sum_per_thread); int num_blocks_prescan1 = std::ceil((num_needed_threads + 1 + NUM_THREADS -1)/NUM_THREADS); double *temp; gpuErrchk_kern(cudaMalloc(&temp, (num_needed_threads+1)*sizeof(double))); prescan0<<<1,1>>>(data, phase0, temp); cudaDeviceSynchronize(); gpuErrchk_kern(cudaGetLastError()); prescan1<<<num_blocks_prescan1, NUM_THREADS>>>(data, n, temp, num_sum_per_thread); cudaDeviceSynchronize(); gpuErrchk_kern(cudaGetLastError()); prescan2<<<1,1>>>(num_needed_threads+1, temp, num_sum_per_thread); cudaDeviceSynchronize(); gpuErrchk_kern(cudaGetLastError()); int num_blocks_prescan3 = std::ceil((n + 1 + NUM_THREADS -1)/NUM_THREADS); prescan3<<<num_blocks_prescan3, NUM_THREADS>>>(data, n, temp, num_sum_per_thread); cudaDeviceSynchronize(); gpuErrchk_kern(cudaGetLastError()); gpuErrchk_kern(cudaFree(temp)); } __global__ void kernel_create_waveform(double *t, double *hI, double *hII, double *tvec, double *evec, double *vvec, double *Mvec, double *Svec, double *gimvec, double *Phivec, double *alpvec, double *nuvec, double *gimdotvec, double lam, double qS, double phiS, double qK, double phiK, bool mich, int init_length, int vlength,int nmodes, int i_plunge, int i_buffer, double zeta, double M_phys, double init_dt, double timestep, int run_length){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= vlength) return; if (i >= run_length) { hI[i] = 0.0; hII[i] = 0.0; return; } // ---------- // TODO: calculate this first section before gpu double coslam=cos(lam); double sinlam=sin(lam); double cosqS=cos(qS); double sinqS=sin(qS); double cosqK=cos(qK); double sinqK=sin(qK); double cosphiK=cos(phiK); double sinphiK=sin(phiK); double halfsqrt3=sqrt(3.)/2.; // ----- compute waveform from t_start to t_end ----- //for(int i=0;i<vlength;i++){ double time = timestep*i; t[i]= time; double t_plunge=i_plunge*timestep; double t_zero=t_plunge+timestep*i_buffer; if (time <=t_zero){ hI[i]=0.; hII[i]=0.; double e=evec[i]; //evec.array[i]; double v=vvec[i]; //vvec.array[i]; double M=Mvec[i]; //Mvec.array[i]; double S=Svec[i]; //Svec.array[i]; double gim=gimvec[i]; //gimvec.array[i]; double Phi=Phivec[i]; //Phivec.array[i]; double alp=alpvec[i]; //alpvec.array[i]; double nu=nuvec[i]; //nuvec.array[i]; double gimdot=gimdotvec[i]; //gimdotvec.array[i]; /*# if __CUDA_ARCH__>=200 //if ((index >= 12000) && (index <= 12100)) //printf("%d, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e\n", i, e, t[i], tvec[index], tvec[index+1], evec.array[index], evec.coeff_1[index], evec.coeff_2[index], evec.coeff_3[index], x, x2, x3); printf("%d, %.18e, %.18e, %.18e, %.18e\n", i, t[i], e, nu, gimdot); #endif //*/ double cosalp=cos(alp); double sinalp=sin(alp); double cosqL=cosqK*coslam+sinqK*sinlam*cosalp; double sinqL=sqrt(1.-cosqL*cosqL); double phiLup=sinqK*sinphiK*coslam-cosphiK*sinlam*sinalp-cosqK*sinphiK*sinlam*cosalp; double phiLdown=sinqK*cosphiK*coslam+sinphiK*sinlam*sinalp-cosqK*cosphiK*sinlam*cosalp; double phiL=atan2(phiLup,phiLdown); double Ldotn=cosqL*cosqS+sinqL*sinqS*cos(phiL-phiS); double Ldotn2=Ldotn*Ldotn; double Sdotn=cosqK*cosqS+sinqK*sinqS*cos(phiK-phiS); double betaup=-Sdotn+coslam*Ldotn; double betadown=sinqS*sin(phiK-phiS)*sinlam*cosalp+(cosqK*Sdotn-cosqS)/sinqK*sinlam*sinalp; double beta=atan2(betaup,betadown); double gam=2.*(gim+beta); double cos2gam=cos(gam); double sin2gam=sin(gam); double orbphs,cosorbphs,sinorbphs,FplusI,FcrosI,FplusII,FcrosII; if(mich){ orbphs=2.*M_PI*t[i]/year; cosorbphs=cos(orbphs-phiS); sinorbphs=sin(orbphs-phiS); double cosq=.5*cosqS-halfsqrt3*sinqS*cosorbphs; double phiw=orbphs+atan2(halfsqrt3*cosqS+.5*sinqS*cosorbphs,sinqS*sinorbphs); double psiup=.5*cosqK-halfsqrt3*sinqK*cos(orbphs-phiK)-cosq*(cosqK*cosqS+sinqK*sinqS*cos(phiK-phiS)); double psidown=.5*sinqK*sinqS*sin(phiK-phiS)-halfsqrt3*cos(orbphs)*(cosqK*sinqS*sin(phiS)-cosqS*sinqK*sin(phiK))-halfsqrt3*sin(orbphs)*(cosqS*sinqK*cos(phiK)-cosqK*sinqS*cos(phiS)); double psi=atan2(psiup,psidown); double cosq1=.5*(1.+cosq*cosq); double cos2phi=cos(2.*phiw); double sin2phi=sin(2.*phiw); double cos2psi=cos(2.*psi); double sin2psi=sin(2.*psi); FplusI=cosq1*cos2phi*cos2psi-cosq*sin2phi*sin2psi; FcrosI=cosq1*cos2phi*sin2psi+cosq*sin2phi*cos2psi; FplusII=cosq1*sin2phi*cos2psi+cosq*cos2phi*sin2psi; FcrosII=cosq1*sin2phi*sin2psi-cosq*cos2phi*cos2psi; } else{ FplusI=1.; FcrosI=0.; FplusII=0.; FcrosII=1.; } double Amp=pow(d_OmegaPhi(v,e,coslam,S,M)*M_phys*SOLARMASSINSEC,2./3.)*zeta; // TODO: check making num modes to gridDim (then need to do reduction to get singular waveform) double fn,Doppler,nPhi; double ne, a, b, c, Aplus, Acros, Aplusold, Acrosold; double rot[4], J[5]; for(int n=1;n<=nmodes;n++){ if(mich){ fn=n*nu+gimdot/M_PI; Doppler=2.*M_PI*fn*AUsec*sinqS*cosorbphs; nPhi=n*Phi+Doppler; } else nPhi=n*Phi; ne=n*e; if(n==1){ J[0]=-1.0*j1(ne); J[1]=j0(ne); J[2]=j1(ne); J[3]=jn(2,ne); J[4]=jn(3,ne); } else{ J[0]=jn(n-2, ne); J[1]=jn(n-1, ne); J[2]=jn(n, ne); J[3]=jn(n+1,ne); J[4]=jn(n+2,ne); } a=-n*Amp*(J[0]-2.*e*J[1]+2./n*J[2]+2.*e*J[3]-J[4])*cos(nPhi); b=-n*Amp*sqrt(1-e*e)*(J[0]-2.*J[2]+J[4])*sin(nPhi); c=2.*Amp*J[2]*cos(nPhi); Aplus=-(1.+Ldotn2)*(a*cos2gam-b*sin2gam)+c*(1-Ldotn2); Acros=2.*Ldotn*(b*cos2gam+a*sin2gam); // ----- rotate to NK wave frame ----- Aplusold=Aplus; Acrosold=Acros; d_RotCoeff(rot,lam,qS,phiS,qK,phiK,alp); Aplus=Aplusold*rot[0]+Acrosold*rot[1]; Acros=Aplusold*rot[2]+Acrosold*rot[3]; // ---------- double hnI,hnII; if(mich){ hnI=halfsqrt3*(FplusI*Aplus+FcrosI*Acros); hnII=halfsqrt3*(FplusII*Aplus+FcrosII*Acros); } else{ hnI=FplusI*Aplus+FcrosI*Acros; hnII=FplusII*Aplus+FcrosII*Acros; } hI[i]+=hnI; hII[i]+=hnII; } } if ((time>t_plunge) &&(i<vlength)){ if(time<t_zero){ hI[i]=hI[i]/(exp((t_plunge-t_zero)/(t[i]-t_plunge)+(t_plunge-t_zero)/(t[i]-t_zero))+1.); hII[i]=hII[i]/(exp((t_plunge-t_zero)/(t[i]-t_plunge)+(t_plunge-t_zero)/(t[i]-t_zero))+1.); } else{ hI[i]=0.; hII[i]=0.; } // ---------- } /*# if __CUDA_ARCH__>=200 if (i == 1000) printf("%d, %.18e, %.18e\n", i, hI[i], hII[i]); #endif //*/ } __global__ void likelihood_prep(cuDoubleComplex *template_channel1, cuDoubleComplex *template_channel2, double *noise_channel1_inv, double *noise_channel2_inv, int length){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= length) return; /*# if __CUDA_ARCH__>=200 if (i == 1000) printf("%d, %.18e, %.18e, %.18e\n", i, cuCreal(template_channel1[i]), cuCimag(template_channel2[i]), noise_channel1_inv[i]); #endif //*/ template_channel1[i] = cuCmul(template_channel1[i], make_cuDoubleComplex(noise_channel1_inv[i], 0.0)); template_channel2[i] = cuCmul(template_channel2[i], make_cuDoubleComplex(noise_channel2_inv[i], 0.0)); }
8002a5ce89cbfecf9beec97286e0d6e89b1ebb90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(int *a, int *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // c[id]=0; // Make sure we do not go out of bounds if (id < n) *c+= a[id]; // printf("\n%d", c[id]); } int main( int argc, char* argv[] ) { // Size of vectors // int n = 100000; int n=5; const int size = n * sizeof(int); // Host input vectors int *h_a; // double *h_b; //Host output vector int *h_c; // Device input vectors int *d_a; //double *d_b; //Device output vector int *d_c; int dev=0; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host //h_a = (int*)malloc(bytes); //h_b = (double*)malloc(bytes); h_c = (int*)malloc(bytes); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); // hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); int i; printf("Input array"); // Initialize vectors on host /*for( i = 0; i < n; i++ ) { // h_a[i] = sin(i)*sin(i); //printf("\n",i); h_a[i]=i; //printf("\n%d", h_a[i]); //h_b[i]=i; //h_b[i] = cos(i)*cos(i); }*/ int a[]= {0, 1, 2, 3, 4}; hipMalloc(&h_a, size); // Copy host vectors to device hipMemcpy( h_a, a, bytes, hipMemcpyHostToDevice); hipMemcpy( d_c, &dev, sizeof(int), hipMemcpyHostToDevice); // hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 2; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); // Execute the kernel hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a,d_c,n); int result; // Copy array back to host hipMemcpy( &result,d_c, sizeof(int), hipMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error double sum = 0; //for(i=0; i<n; i++) // sum += h_c[i]; printf("final result: %f\n",result ); // vecdev<<<gridSize, blockSize>>>(d_a,d_c, n); // Release device memory hipFree(d_a); //hipFree(d_b); hipFree(d_c); // Release host memory free(h_a); //free(h_b); free(h_c); return 0; }
8002a5ce89cbfecf9beec97286e0d6e89b1ebb90.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(int *a, int *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // c[id]=0; // Make sure we do not go out of bounds if (id < n) *c+= a[id]; // printf("\n%d", c[id]); } int main( int argc, char* argv[] ) { // Size of vectors // int n = 100000; int n=5; const int size = n * sizeof(int); // Host input vectors int *h_a; // double *h_b; //Host output vector int *h_c; // Device input vectors int *d_a; //double *d_b; //Device output vector int *d_c; int dev=0; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host //h_a = (int*)malloc(bytes); //h_b = (double*)malloc(bytes); h_c = (int*)malloc(bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); // cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); int i; printf("Input array"); // Initialize vectors on host /*for( i = 0; i < n; i++ ) { // h_a[i] = sin(i)*sin(i); //printf("\n",i); h_a[i]=i; //printf("\n%d", h_a[i]); //h_b[i]=i; //h_b[i] = cos(i)*cos(i); }*/ int a[]= {0, 1, 2, 3, 4}; cudaMalloc(&h_a, size); // Copy host vectors to device cudaMemcpy( h_a, a, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_c, &dev, sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 2; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); // Execute the kernel vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n); int result; // Copy array back to host cudaMemcpy( &result,d_c, sizeof(int), cudaMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error double sum = 0; //for(i=0; i<n; i++) // sum += h_c[i]; printf("final result: %f\n",result ); // vecdev<<<gridSize, blockSize>>>(d_a,d_c, n); // Release device memory cudaFree(d_a); //cudaFree(d_b); cudaFree(d_c); // Release host memory free(h_a); //free(h_b); free(h_c); return 0; }
410bb1bddf347764583873c4521d05cf1b65967c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common/book.h" #define imin(a, b) (a<b?a:b) #define sum_squares(x) (x*(x+1)*(2*x+1)/6) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock); __global__ void dot(float* a, float* b, float* c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0) { if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if(cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main() { float* a, *b, *partial_c, c; float* dev_a, *dev_b, *dev_partial_c; a = (float*)malloc(N*sizeof(float)); b = (float*)malloc(N*sizeof(float)); partial_c = (float*)malloc(blocksPerGrid*sizeof(float)); HANDLE_ERROR(hipMalloc((void**)&dev_a, N*sizeof(float))); HANDLE_ERROR(hipMalloc((void**)&dev_b, N*sizeof(float))); HANDLE_ERROR(hipMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float))); for(int i=0; i<N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(hipMemcpy(dev_a, a, N*sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, N*sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c); HANDLE_ERROR(hipMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost)); c = 0; for(int i=0; i<blocksPerGrid; i++) c += partial_c[i]; printf("GPU value: %.6g\nCorrect value: %.6g\n", c, 2*sum_squares((float)(N-1))); hipFree(dev_a); hipFree(dev_b); hipFree(dev_partial_c); free(a); free(b); free(partial_c); }
410bb1bddf347764583873c4521d05cf1b65967c.cu
#include "../common/book.h" #define imin(a, b) (a<b?a:b) #define sum_squares(x) (x*(x+1)*(2*x+1)/6) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock); __global__ void dot(float* a, float* b, float* c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0) { if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if(cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main() { float* a, *b, *partial_c, c; float* dev_a, *dev_b, *dev_partial_c; a = (float*)malloc(N*sizeof(float)); b = (float*)malloc(N*sizeof(float)); partial_c = (float*)malloc(blocksPerGrid*sizeof(float)); HANDLE_ERROR(cudaMalloc((void**)&dev_a, N*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&dev_b, N*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float))); for(int i=0; i<N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice)); dot<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_partial_c); HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost)); c = 0; for(int i=0; i<blocksPerGrid; i++) c += partial_c[i]; printf("GPU value: %.6g\nCorrect value: %.6g\n", c, 2*sum_squares((float)(N-1))); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_partial_c); free(a); free(b); free(partial_c); }
3c848b4d72f182e3ec4bcf856b0ca4d92146e520.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include <matx.h> using namespace matx; int main() { using complex = cuda::std::complex<float>; index_t signal_size = 16; index_t filter_size = 3; index_t filtered_size = signal_size + filter_size - 1; // Create time domain buffers auto sig_time = make_tensor<complex>({signal_size}); auto filt_time = make_tensor<complex>({filter_size}); auto time_out = make_tensor<complex>({filtered_size}); // Frequency domain buffers auto sig_freq = make_tensor<complex>({filtered_size}); auto filt_freq = make_tensor<complex>({filtered_size}); // Fill the time domain signals with data for (index_t i = 0; i < signal_size; i++) { sig_time(i) = {-1.0f * (2.0f * static_cast<float>(i % 2) + 1.0f) * (static_cast<float>(i % 10) / 10.0f) + 0.1f, -1.0f * (static_cast<float>(i % 2) == 0.0f) * (static_cast<float>(i % 10) / 5.0f) - 0.1f}; } for (index_t i = 0; i < filter_size; i++) { filt_time(i) = {static_cast<float>(i) / static_cast<float>(filter_size), static_cast<float>(-i) / static_cast<float>(filter_size) + 0.5f}; } // TODO: Perform FFT convolution // Perform the FFT in-place on both signal and filter, do an element-wise multiply of the two, then IFFT that output // TODO: Perform a time-domain convolution hipStreamSynchronize(0); // Compare signals for (index_t i = 0; i < filtered_size; i++) { if ( fabs(time_out(i).real() - sig_freq(i).real()) > 0.001 || fabs(time_out(i).imag() - sig_freq(i).imag()) > 0.001) { printf("Verification failed at item %lld. Direct=%f%+.2fj, FFT=%f%+.2fj\n", i, time_out(i).real(), time_out(i).imag(), sig_freq(i).real(), sig_freq(i).imag()); return -1; } } std::cout << "Verification successful" << std::endl; return 0; }
3c848b4d72f182e3ec4bcf856b0ca4d92146e520.cu
//////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include <matx.h> using namespace matx; int main() { using complex = cuda::std::complex<float>; index_t signal_size = 16; index_t filter_size = 3; index_t filtered_size = signal_size + filter_size - 1; // Create time domain buffers auto sig_time = make_tensor<complex>({signal_size}); auto filt_time = make_tensor<complex>({filter_size}); auto time_out = make_tensor<complex>({filtered_size}); // Frequency domain buffers auto sig_freq = make_tensor<complex>({filtered_size}); auto filt_freq = make_tensor<complex>({filtered_size}); // Fill the time domain signals with data for (index_t i = 0; i < signal_size; i++) { sig_time(i) = {-1.0f * (2.0f * static_cast<float>(i % 2) + 1.0f) * (static_cast<float>(i % 10) / 10.0f) + 0.1f, -1.0f * (static_cast<float>(i % 2) == 0.0f) * (static_cast<float>(i % 10) / 5.0f) - 0.1f}; } for (index_t i = 0; i < filter_size; i++) { filt_time(i) = {static_cast<float>(i) / static_cast<float>(filter_size), static_cast<float>(-i) / static_cast<float>(filter_size) + 0.5f}; } // TODO: Perform FFT convolution // Perform the FFT in-place on both signal and filter, do an element-wise multiply of the two, then IFFT that output // TODO: Perform a time-domain convolution cudaStreamSynchronize(0); // Compare signals for (index_t i = 0; i < filtered_size; i++) { if ( fabs(time_out(i).real() - sig_freq(i).real()) > 0.001 || fabs(time_out(i).imag() - sig_freq(i).imag()) > 0.001) { printf("Verification failed at item %lld. Direct=%f%+.2fj, FFT=%f%+.2fj\n", i, time_out(i).real(), time_out(i).imag(), sig_freq(i).real(), sig_freq(i).imag()); return -1; } } std::cout << "Verification successful" << std::endl; return 0; }
159e3455e0f807886341b80cfa24a82cce3465ab.hip
// !!! This is a file automatically generated by hipify!!! #include "THHTensor.h" hipTextureObject_t THCudaTensor_getTextureObject(THCState *state, THCudaTensor *self) { hipTextureObject_t texObj; struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeLinear; resDesc.res.linear.devPtr = THCudaTensor_data(state, self); resDesc.res.linear.sizeInBytes = THCudaTensor_nElement(state, self) * 4; resDesc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) { if (THCudaTensor_nElement(state, self) > 2>>27) THError("Failed to create texture object, " "nElement:%ld exceeds 27-bit addressing required for tex1Dfetch. Cuda Error: %s", THCudaTensor_nElement(state, self), hipGetErrorString(errcode)); else THError("Failed to create texture object: %s", hipGetErrorString(errcode)); } return texObj; } THC_API int THCudaTensor_getDevice(THCState* state, const THCudaTensor* thc) { if (!thc->storage) return -1; hipPointerAttribute_t attr; THCudaCheck(hipPointerGetAttributes(&attr, thc->storage->data)); return attr.device; }
159e3455e0f807886341b80cfa24a82cce3465ab.cu
#include "THCTensor.h" cudaTextureObject_t THCudaTensor_getTextureObject(THCState *state, THCudaTensor *self) { cudaTextureObject_t texObj; struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = THCudaTensor_data(state, self); resDesc.res.linear.sizeInBytes = THCudaTensor_nElement(state, self) * 4; resDesc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) { if (THCudaTensor_nElement(state, self) > 2>>27) THError("Failed to create texture object, " "nElement:%ld exceeds 27-bit addressing required for tex1Dfetch. Cuda Error: %s", THCudaTensor_nElement(state, self), cudaGetErrorString(errcode)); else THError("Failed to create texture object: %s", cudaGetErrorString(errcode)); } return texObj; } THC_API int THCudaTensor_getDevice(THCState* state, const THCudaTensor* thc) { if (!thc->storage) return -1; cudaPointerAttributes attr; THCudaCheck(cudaPointerGetAttributes(&attr, thc->storage->data)); return attr.device; }
ef3cce1dec2ec53360f88720887700e35d1f4b10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <errno.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "error.h" #include "f5c.h" #include "f5cmisc.cuh" #include "f5cmisc.h" void init_cuda(core_t* core){ cuda_exists(); int32_t cuda_device_num = core->opt.cuda_dev_id; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, cuda_device_num); CUDA_CHK(); STDERR("Running on %s (device id %d)",prop.name, cuda_device_num); //fprintf(stderr,"AVG_EVENTS_PER_KMER %f\n",AVG_EVENTS_PER_KMER); //fprintf(stderr,"AVG_EVENTS_PER_KMER %f\n",AVG_EVENTS_PER_KMER_GPU_THRESH); //fprintf(stderr,"readfac %f\n",core->opt.cuda_max_readlen); assert(AVG_EVENTS_PER_KMER>0 && AVG_EVENTS_PER_KMER>0); core->cuda = (cuda_data_t*)malloc(sizeof(cuda_data_t)); MALLOC_CHK(core->cuda); core->align_kernel_time=0; core->align_pre_kernel_time=0; core->align_core_kernel_time=0; core->align_post_kernel_time=0; core->align_cuda_malloc=0; core->extra_load_cpu=0; core->align_cuda_memcpy=0; core->align_cuda_postprocess=0; core->align_cuda_preprocess=0; core->previous = -1; core->previous_count = 0; #ifdef CUDA_PRE_MALLOC int32_t n_bam_rec = core->opt.batch_size; //cpu arrays core->cuda->read_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->read_ptr_host); core->cuda->n_events_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->n_events_host); core->cuda->event_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->event_ptr_host); core->cuda->read_len_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->read_len_host); core->cuda->scalings_host = (scalings_t*)malloc(sizeof(scalings_t) * n_bam_rec); MALLOC_CHK(core->cuda->scalings_host); core->cuda->n_event_align_pairs_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->n_event_align_pairs_host); //cuda arrays if(core->opt.verbosity>1) print_size("read_ptr array",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&(core->cuda->read_ptr), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("read_lens",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&(core->cuda->read_len), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //n_events if(core->opt.verbosity>1) print_size("n_events",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&(core->cuda->n_events), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //event ptr if(core->opt.verbosity>1) print_size("event ptr",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&(core->cuda->event_ptr), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //scalings : already linear if(core->opt.verbosity>1) print_size("Scalings",n_bam_rec * sizeof(scalings_t)); hipMalloc((void**)&(core->cuda->scalings), n_bam_rec * sizeof(scalings_t)); CUDA_CHK(); hipMalloc((void**)&(core->cuda->model), NUM_KMER * sizeof(model_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("n_event_align_pairs",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&(core->cuda->n_event_align_pairs), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //model : already linear //move to cuda_init hipMemcpy(core->cuda->model, core->model, NUM_KMER * sizeof(model_t), hipMemcpyHostToDevice); CUDA_CHK(); #ifndef CUDA_DYNAMIC_MALLOC // //dynamic arrays //compute the maximum uint64_t free_mem = 0; if(prop.integrated==1){ //in tegra free mem should be sought differently free_mem=tegra_freemem(cuda_device_num); } else{ free_mem=cuda_freemem(cuda_device_num); } double factor = 1 * sizeof(char) + //read_capacity AVG_EVENTS_PER_KMER * sizeof(event_t) + //event_table_capacity 1 * sizeof(model_t) + //model_kmer_cache_capacity (AVG_EVENTS_PER_KMER * 2) * sizeof(AlignedPair) + //event_align_pairs_capacity (AVG_EVENTS_PER_KMER + 1) * ALN_BANDWIDTH * sizeof(float) + //bands_capacity (AVG_EVENTS_PER_KMER + 1) * ALN_BANDWIDTH * sizeof(uint8_t) + //trace_capacity (AVG_EVENTS_PER_KMER + 1) * sizeof(EventKmerPair) ; //band_lower_left_capacity uint64_t sum_read_len = 0; //if unset by user (or set to weird values by user) if(core->opt.cuda_mem_frac>=1.0f || core->opt.cuda_mem_frac<=0.0f){ if(prop.integrated==1){ //for tegra we have to reserve some space for RAM sum_read_len= floor(free_mem*TEGRA_MEM_FACTOR/factor); } else{ sum_read_len= floor(free_mem*MEM_FACTOR/factor); } } else{ sum_read_len= floor(free_mem*(core->opt.cuda_mem_frac)/factor); } core->cuda->max_sum_read_len = sum_read_len; uint64_t sum_n_events = floor(sum_read_len * AVG_EVENTS_PER_KMER); uint64_t read_capacity = sum_read_len * sizeof(char); uint64_t event_table_capacity = sum_n_events * sizeof(event_t); uint64_t model_kmer_cache_capacity= sum_read_len * sizeof(model_t); uint64_t event_align_pairs_capacity= sum_n_events * 2 * sizeof(AlignedPair); uint64_t bands_capacity = (sum_n_events + sum_read_len) * ALN_BANDWIDTH * sizeof(float) ; uint64_t trace_capacity = (sum_n_events + sum_read_len) * ALN_BANDWIDTH * sizeof(uint8_t) ; uint64_t band_lower_left_capacity = (sum_n_events + sum_read_len) * sizeof(EventKmerPair); assert(read_capacity + event_table_capacity + model_kmer_cache_capacity + event_align_pairs_capacity + bands_capacity + trace_capacity + band_lower_left_capacity <= free_mem); if(core->opt.verbosity>1) print_size("read_capacity",read_capacity); if(core->opt.verbosity>1) print_size("event_table_capacity",event_table_capacity); if(core->opt.verbosity>1) print_size("model_kmer_cache_capacity",model_kmer_cache_capacity); if(core->opt.verbosity>1) print_size("event_align_pairs_capacity",event_align_pairs_capacity); if(core->opt.verbosity>1) print_size("bands_capacity",bands_capacity); if(core->opt.verbosity>1) print_size("trace_capacity",trace_capacity); if(core->opt.verbosity>1) print_size("band_lower_left_capacity",band_lower_left_capacity); //input arrays hipMalloc((void**)&(core->cuda->read), read_capacity); //with null char CUDA_CHK(); hipMalloc((void**)&(core->cuda->event_table), event_table_capacity); CUDA_CHK(); hipMalloc((void**)&(core->cuda->model_kmer_cache), model_kmer_cache_capacity); CUDA_CHK(); /**allocate output arrays for cuda**/ hipMalloc((void**)&(core->cuda->event_align_pairs),event_align_pairs_capacity); //todo : need better huristic CUDA_CHK(); //scratch arrays hipMalloc((void**)&(core->cuda->bands), bands_capacity); CUDA_CHK(); hipMalloc((void**)&(core->cuda->trace), trace_capacity); CUDA_CHK(); hipMalloc((void**)&(core->cuda->band_lower_left), band_lower_left_capacity); CUDA_CHK(); STDERR("Max GPU capacity %.1fM bases",core->cuda->max_sum_read_len/(1000.0*1000.0)); #endif #endif return; } void free_cuda(core_t* core){ #ifdef CUDA_PRE_MALLOC free(core->cuda->event_ptr_host); free(core->cuda->n_events_host); free(core->cuda->read_ptr_host); free(core->cuda->read_len_host); free(core->cuda->scalings_host); free(core->cuda->n_event_align_pairs_host); hipFree(core->cuda->read_ptr); hipFree(core->cuda->read_len); hipFree(core->cuda->n_events); hipFree(core->cuda->event_ptr); hipFree(core->cuda->model); //constant memory hipFree(core->cuda->scalings); hipFree(core->cuda->n_event_align_pairs); #ifndef CUDA_DYNAMIC_MALLOC hipFree(core->cuda->read); hipFree(core->cuda->event_table); hipFree(core->cuda->model_kmer_cache); hipFree(core->cuda->event_align_pairs); hipFree(core->cuda->bands); hipFree(core->cuda->trace); hipFree(core->cuda->band_lower_left); #endif #endif free(core->cuda); return; } #ifndef CPU_GPU_PROC void align_cuda(core_t* core, db_t* db) { int32_t i; int32_t n_bam_rec = db->n_bam_rec; double realtime1; /**cuda pointers*/ char* read; //flattened reads sequences int32_t* read_ptr; //index pointer for flattedned "reads" int32_t* read_len; int64_t sum_read_len; int32_t* n_events; event_t* event_table; int32_t* event_ptr; int64_t sum_n_events; scalings_t* scalings; AlignedPair* event_align_pairs; int32_t* n_event_align_pairs; float *bands; uint8_t *trace; EventKmerPair* band_lower_left; realtime1 = realtime(); #ifdef CUDA_PRE_MALLOC int32_t* read_ptr_host = core->cuda->read_ptr_host; #else //get the total size and create the pointers int32_t* read_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(read_ptr_host); #endif sum_read_len = 0; //read sequences : needflattening for (i = 0; i < n_bam_rec; i++) { read_ptr_host[i] = sum_read_len; sum_read_len += (db->read_len[i] + 1); //with null term } //form the temporary flattened array on host char* read_host = (char*)malloc(sizeof(char) * sum_read_len); MALLOC_CHK(read_host); for (i = 0; i < n_bam_rec; i++) { int32_t idx = read_ptr_host[i]; strcpy(&read_host[idx], db->read[i]); } //now the events : need flattening //num events : need flattening //get the total size and create the pointers #ifdef CUDA_PRE_MALLOC int32_t* n_events_host = core->cuda->n_events_host; int32_t* event_ptr_host = core->cuda->event_ptr_host; #else int32_t* n_events_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(n_events_host); int32_t* event_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(event_ptr_host); #endif sum_n_events = 0; for (i = 0; i < n_bam_rec; i++) { n_events_host[i] = db->et[i].n; event_ptr_host[i] = sum_n_events; sum_n_events += db->et[i].n; } //event table flatten //form the temporary flattened array on host event_t* event_table_host = (event_t*)malloc(sizeof(event_t) * sum_n_events); MALLOC_CHK(event_table_host); for (i = 0; i < n_bam_rec; i++) { int32_t idx = event_ptr_host[i]; memcpy(&event_table_host[idx], db->et[i].event, sizeof(event_t) * db->et[i].n); } AlignedPair* event_align_pairs_host = (AlignedPair*)malloc(2 * sum_n_events * sizeof(AlignedPair)); MALLOC_CHK(event_align_pairs_host); core->align_cuda_preprocess += (realtime() - realtime1); /** Start GPU mallocs**/ realtime1 = realtime(); #ifdef CUDA_PRE_MALLOC read_ptr =core->cuda->read_ptr; read_len=core->cuda->read_len; n_events=core->cuda->n_events; event_ptr=core->cuda->event_ptr; scalings=core->cuda->scalings; model_t* model = core->cuda->model; #else if(core->opt.verbosity>1) print_size("read_ptr array",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&read_ptr, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("read_lens",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&read_len, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //n_events if(core->opt.verbosity>1) print_size("n_events",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&n_events, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //event ptr if(core->opt.verbosity>1) print_size("event ptr",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&event_ptr, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //scalings : already linear if(core->opt.verbosity>1) print_size("Scalings",n_bam_rec * sizeof(scalings_t)); hipMalloc((void**)&scalings, n_bam_rec * sizeof(scalings_t)); CUDA_CHK(); //model : already linear model_t* model; hipMalloc((void**)&model, NUM_KMER * sizeof(model_t)); CUDA_CHK(); #endif if(core->opt.verbosity>1) print_size("read array",sum_read_len * sizeof(char)); hipMalloc((void**)&read, sum_read_len * sizeof(char)); //with null char CUDA_CHK(); if(core->opt.verbosity>1) print_size("event table",sum_n_events * sizeof(event_t)); hipMalloc((void**)&event_table, sum_n_events * sizeof(event_t)); CUDA_CHK(); model_t* model_kmer_cache; hipMalloc((void**)&model_kmer_cache, sum_read_len * sizeof(model_t)); CUDA_CHK(); /**allocate output arrays for cuda**/ if(core->opt.verbosity>1) print_size("event align pairs",2 * sum_n_events *sizeof(AlignedPair)); hipMalloc((void**)&event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair)); //todo : need better huristic CUDA_CHK(); #ifdef CUDA_PRE_MALLOC n_event_align_pairs=core->cuda->n_event_align_pairs; #else if(core->opt.verbosity>1) print_size("n_event_align_pairs",n_bam_rec * sizeof(int32_t)); hipMalloc((void**)&n_event_align_pairs, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); #endif //scratch arrays size_t sum_n_bands = sum_n_events + sum_read_len; //todo : can be optimised if(core->opt.verbosity>1) print_size("bands",sizeof(float) * sum_n_bands * ALN_BANDWIDTH); hipMalloc((void**)&bands,sizeof(float) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); if(core->opt.verbosity>1) print_size("trace",sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); hipMalloc((void**)&trace, sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); hipMemset(trace,0,sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); //initialise the trace array to 0 if(core->opt.verbosity>1) print_size("band_lower_left",sizeof(EventKmerPair)* sum_n_bands); hipMalloc((void**)&band_lower_left, sizeof(EventKmerPair)* sum_n_bands); CUDA_CHK(); core->align_cuda_malloc += (realtime() - realtime1); /* cuda mem copys*/ realtime1 =realtime(); hipMemcpy(read_ptr, read_ptr_host, n_bam_rec * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(read, read_host, sum_read_len * sizeof(char), hipMemcpyHostToDevice); CUDA_CHK(); //read length : already linear hence direct copy hipMemcpy(read_len, db->read_len, n_bam_rec * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(n_events, n_events_host, n_bam_rec * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(event_ptr, event_ptr_host, n_bam_rec * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(event_table, event_table_host, sizeof(event_t) * sum_n_events, hipMemcpyHostToDevice); CUDA_CHK(); #ifndef CUDA_PRE_MALLOC //model : already linear //move to cuda_init hipMemcpy(model, core->model, NUM_KMER * sizeof(model_t), hipMemcpyHostToDevice); CUDA_CHK(); #endif //can be interleaved hipMemcpy(scalings, db->scalings, sizeof(scalings_t) * n_bam_rec, hipMemcpyHostToDevice); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); /*pre kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 gridpre(1,(db->n_bam_rec + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 blockpre(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); if(core->opt.verbosity>1) fprintf(stderr,"grid %d,%d, block %d,%d\n",gridpre.x,gridpre.y, blockpre.x,blockpre.y); hipLaunchKernelGGL(( align_kernel_pre_2d), dim3(gridpre), dim3(blockpre), 0, 0, read, read_len, read_ptr, n_events, event_ptr, model, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left); hipDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3f*%.2f] align-pre kernel done\n", __func__, realtime() - realtime1, cputime() / (realtime() - realtime1)); core->align_kernel_time += (realtime() - realtime1); core->align_pre_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /* core kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 grid1(1,(db->n_bam_rec + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 block1(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); hipLaunchKernelGGL(( align_kernel_core_2d_shm), dim3(grid1), dim3(block1), 0, 0, read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left ); hipDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3f*%.2f] align-core kernel done\n", __func__, realtime() - realtime1, cputime() / (realtime() - realtime1)); core->align_kernel_time += (realtime() - realtime1); core->align_core_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /*post kernel*/ int32_t BLOCK_LEN = core->opt.cuda_block_size; dim3 gridpost((db->n_bam_rec + BLOCK_LEN - 1) / BLOCK_LEN); dim3 blockpost(BLOCK_LEN); #ifndef WARP_HACK hipLaunchKernelGGL(( align_kernel_post), dim3(gridpost), dim3(blockpost), 0, 0, event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr,scalings, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left ); #else assert(BLOCK_LEN>=32); dim3 grid1post((db->n_bam_rec + (BLOCK_LEN/32) - 1) / (BLOCK_LEN/32)); if(core->opt.verbosity>1) fprintf(stderr,"grid new %d\n",grid1post.x); hipLaunchKernelGGL(( align_kernel_post), dim3(grid1post), dim3(blockpost), 0, 0, event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left ); #endif hipDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3f*%.2f] align-post kernel done\n", __func__, realtime() - realtime1, cputime() / (realtime() - realtime1)); core->align_kernel_time += (realtime() - realtime1); core->align_post_kernel_time += (realtime() - realtime1); //fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs); #ifdef CUDA_DEBUG hipDeviceSynchronize(); CUDA_CHK(); #endif /** copyback ans**/ realtime1 = realtime(); hipMemcpy(db->n_event_align_pairs, n_event_align_pairs, n_bam_rec * sizeof(int32_t), hipMemcpyDeviceToHost); CUDA_CHK(); hipMemcpy(event_align_pairs_host, event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair), hipMemcpyDeviceToHost); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); #ifndef CUDA_PRE_MALLOC hipFree(read_ptr); hipFree(read_len); hipFree(n_events); hipFree(event_ptr); hipFree(model); //constant memory hipFree(scalings); hipFree(n_event_align_pairs); #endif hipFree(read); //with null char hipFree(event_table); hipFree(event_align_pairs); hipFree(bands); hipFree(trace); hipFree(band_lower_left); hipFree(model_kmer_cache); core->align_cuda_malloc += (realtime() - realtime1); /** post work**/ realtime1 = realtime(); //copy back for (i = 0; i < n_bam_rec; i++) { int32_t idx = event_ptr_host[i]; memcpy(db->event_align_pairs[i], &event_align_pairs_host[idx * 2], sizeof(AlignedPair) * db->n_event_align_pairs[i]); } //free the temp arrays on host #ifndef CUDA_PRE_MALLOC free(read_ptr_host); free(n_events_host); free(event_ptr_host); #endif free(read_host); free(event_table_host); free(event_align_pairs_host); core->align_cuda_postprocess += (realtime() - realtime1); } #else #ifdef WORK_STEAL static inline int32_t steal_work(pthread_arg_t* all_args, int32_t n_threads) { int32_t i, c_i = -1; int32_t k; for (i = 0; i < n_threads; ++i){ pthread_arg_t args = all_args[i]; //fprintf(stderr,"endi : %d, starti : %d\n",args.endi,args.starti); if (args.endi-args.starti > STEAL_THRESH_CUDA) { //fprintf(stderr,"gap : %d\n",args.endi-args.starti); c_i = i; break; } } if(c_i<0){ return -1; } k = __sync_fetch_and_add(&(all_args[c_i].starti), 1); //fprintf(stderr,"k : %d, end %d, start %d\n",k,all_args[c_i].endi,all_args[c_i].starti); return k >= all_args[c_i].endi ? -1 : k; } #endif void* pthread_cusingle(void* voidargs) { int32_t i,j; pthread_arg_t* args = (pthread_arg_t*)voidargs; db_t* db = args->db; core_t* core = args->core; #ifndef WORK_STEAL for (i = args->starti; i < args->endi; i++) { j=args->ultra_long_reads[i]; args->func(core,db,j); } #else pthread_arg_t* all_args = (pthread_arg_t*)(args->all_pthread_args); //adapted from ktherad for (;;) { i = __sync_fetch_and_add(&args->starti, 1); if (i >= args->endi) { break; } j=args->ultra_long_reads[i]; args->func(core,db,j); } while ((i = steal_work(all_args,core->opt.num_thread)) >= 0){ j=args->ultra_long_reads[i]; args->func(core,db,j); } #endif //fprintf(stderr,"Thread %d done\n",(myargs->position)/THREADS); pthread_exit(0); } void pthread_cudb(core_t* core, db_t* db, int32_t* ultra_long_reads, int32_t n_ultra_long_reads,void (*func)(core_t*,db_t*,int)){ //create threads pthread_t tids[core->opt.num_thread]; pthread_arg_t pt_args[core->opt.num_thread]; int32_t t, ret; int32_t i = 0; int32_t num_thread = core->opt.num_thread; int32_t step = (n_ultra_long_reads + num_thread - 1) / num_thread; //todo : check for higher num of threads than the data //current works but many threads are created despite //set the data structures for (t = 0; t < num_thread; t++) { pt_args[t].core = core; pt_args[t].db = db; pt_args[t].starti = i; i += step; if (i > n_ultra_long_reads) { pt_args[t].endi = n_ultra_long_reads; } else { pt_args[t].endi = i; } pt_args[t].func=func; pt_args[t].ultra_long_reads=ultra_long_reads; #ifdef WORK_STEAL pt_args[t].all_pthread_args = (void *)pt_args; #endif //fprintf(stderr,"t%d : %d-%d\n",t,pt_args[t].starti,pt_args[t].endi); } //create threads for(t = 0; t < core->opt.num_thread; t++){ ret = pthread_create(&tids[t], NULL, pthread_cusingle, (void*)(&pt_args[t])); NEG_CHK(ret); } //pthread joining for (t = 0; t < core->opt.num_thread; t++) { int ret = pthread_join(tids[t], NULL); NEG_CHK(ret); } } void* align_cudb(void* voidargs){ double realtime1 = realtime(); pthread_arg_t* args = (pthread_arg_t*)voidargs; db_t* db = args->db; core_t* core = args->core; int32_t* ultra_long_reads = args->ultra_long_reads; int32_t n_ultra_long_reads = args->endi; //fprintf(stderr,"ultra long guys : %d\n",n_ultra_long_reads); //fprintf(stderr, "cpu\n"); if (core->opt.num_thread == 1) { int j; for(j=0;j<n_ultra_long_reads;j++) { int32_t i = ultra_long_reads[j]; align_single(core, db, i); // db->n_event_align_pairs[i] = // align(db->event_align_pairs[i], db->read[i], // db->read_len[i], db->et[i], core->model, // db->scalings[i], db->f5[i]->sample_rate); //fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs); } } else { pthread_cudb(core, db, ultra_long_reads,n_ultra_long_reads,align_single); } args->ret1 = realtime() - realtime1; if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] %d reads processed on cpu\n", __func__, realtime() - realtime1, n_ultra_long_reads); return NULL; } pthread_t align_cudb_async(pthread_arg_t **pt_args_ptr,core_t* core, db_t* db, int32_t* ultra_long_reads, int32_t n_ultra_long_reads) { assert(*pt_args_ptr==NULL); *pt_args_ptr = (pthread_arg_t *)malloc(sizeof(pthread_arg_t)); pthread_arg_t *pt_args=*pt_args_ptr; MALLOC_CHK(pt_args); pt_args->core = core; pt_args->db = db; pt_args->starti = 0; pt_args->endi = n_ultra_long_reads; pt_args->ultra_long_reads=ultra_long_reads; pthread_t tid; int ret = pthread_create(&tid, NULL, align_cudb,(void*)(pt_args)); NEG_CHK(ret); return tid; } double align_cudb_async_join(pthread_arg_t *pt_args, pthread_t tid) { int ret = pthread_join(tid, NULL); NEG_CHK(ret); assert(pt_args); double time_cpu = pt_args->ret1; free(pt_args); return time_cpu; } //check if we have run out of space in the pre-allocated gpu arrays static inline int8_t if_gpu_mem_free(core_t* core, db_t* db, int32_t i,int64_t sum_read_len,int64_t sum_n_events){ #ifdef CUDA_DYNAMIC_MALLOC return 1; #else if((sum_read_len+(db->read_len[i] + 1) <= (int64_t)core->cuda->max_sum_read_len) && (sum_n_events+db->et[i].n <= floor(core->cuda->max_sum_read_len * AVG_EVENTS_PER_KMER)) ){ return 1; } else{ return 0; } #endif } //if a suitable candidate to be run on GPU //ultra-long reads as well as the reads with too many average events per base //are done of CPU static inline int8_t if_on_gpu(core_t* core, db_t* db, int32_t i){ if(db->read_len[i]<(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec) && (db->et[i].n)/(float)(db->read_len[i]) < AVG_EVENTS_PER_KMER_GPU_THRESH ){ return 1; } else{ return 0; } } #define GPU_DEC_MAX_LF 0 #define GPU_DEC_AVG_EPK 1 #define GPU_DEC_MAX_EPK 2 #define GPU_INC_AVG_EPK 3 #define GPU_DEC_B 4 #define CPU_INC_MAX_EPK_LF 5 #define CPU_INC_MAX_LF 6 #define CPU_INC_MAX_EPK 7 #define GPU_DEC_MAX_EPK_LF 8 #define GPU_INC_K 9 #define GPU_INC_B 10 static inline void load_balance_advisor(core_t* core, int32_t state){ if(core->previous==state){ core->previous_count++; if(core->previous_count>3){ switch (core->previous) { case GPU_DEC_MAX_LF : INFO("%s","GPU read arrays ran out due to ultra long reads. consider decreasing --cuda-max-lf"); break; case GPU_DEC_AVG_EPK : INFO("%s", "GPU read arrays ran out due to too much being allocated for event arrays. consider decreasing --cuda-avg-epk"); break; case GPU_DEC_MAX_EPK : INFO("%s","GPU event arrays ran out due to over segmented reads. consider decreasing --cuda-max-epk"); break; case GPU_INC_AVG_EPK : INFO("%s","GPU event arrays ran out due to not enough being allocated for events. consider increasing --cuda-avg-epk"); break; case GPU_DEC_B : INFO("%s","GPU arrays ran out. consider reducing --max-bases (-B option)"); break; case CPU_INC_MAX_EPK_LF : INFO("%s", "CPU got too much work. consider increasing --cuda-max-epk or --cuda-max-lf"); break; case CPU_INC_MAX_LF : INFO("%s", "CPU got too much work. consider increasing --cuda-max-lf"); break; case CPU_INC_MAX_EPK : INFO("%s", "CPU got too much work. consider increasing --cuda-max-epk"); break; case GPU_DEC_MAX_EPK_LF : INFO("%s", "GPU got too much work. consider decreasing --cuda-max-epk or --cuda-max-lf"); break; case GPU_INC_K : INFO("%s", "GPU arrays are not fully utilised. consider increasing the --batchsize (-K option)"); break; case GPU_INC_B : INFO("%s", "GPU arrays are not fully utilised. consider increasing the --max-bases (-B option)"); break; default : break; } } } else{ core->previous=state; core->previous_count=0; } } void load_balance(core_t *core, db_t *db, double cpu_process_time,double gpu_process_time, int32_t stat_n_gpu_mem_out, int32_t stat_n_too_many_events, int32_t stat_n_ultra_long_reads, float read_array_usage, float event_array_usage){ fprintf(stderr,"[%s] Processing time : CPU %.1f sec, GPU %.1f sec\n",__func__,cpu_process_time,gpu_process_time); double factor = (cpu_process_time-gpu_process_time)/(cpu_process_time+gpu_process_time); if (core->opt.verbosity>1) fprintf(stderr,"[%s] factor %f\n",__func__,factor); float thresh_factor=0.2; float thresh_reads=0.05; float thresh=0.2; if(factor>thresh_factor){ //cpu too much time if (core->opt.verbosity>1) fprintf(stderr,"[%s] CPU too much time\n",__func__); if(stat_n_gpu_mem_out > db->n_bam_rec * thresh_reads){ //gpu run out of memory if (core->opt.verbosity>1) fprintf(stderr,"[%s] Looks like the loaded dataset is too much for the GPU.\n",__func__); //are the GPU arrays balanced? give a warning if(read_array_usage>99 && event_array_usage<100-thresh*100){ //read array full if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU read array usage too high.\n",__func__); if(stat_n_ultra_long_reads> db->n_bam_rec * thresh_reads){ //array fileld sue to ultra long reads load_balance_advisor(core,GPU_DEC_MAX_LF); if (core->opt.verbosity>1) INFO("%s","GPU read arrays ran out due to ultra long reads. If this message repeats, consider decreasing --cuda-max-lf"); } else{ load_balance_advisor(core,GPU_DEC_AVG_EPK); if (core->opt.verbosity>1) INFO("%s", "GPU read arrays ran out due to too much being allocated for event arrays. If this message repeats, consider decreasing --cuda-avg-epk"); } } else if(event_array_usage>99 && read_array_usage<100-thresh*100){ //event array full if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU event array usage too high.\n",__func__); if(stat_n_too_many_events > db->n_bam_rec * thresh_reads){//array filled mainly due to reads with too many events load_balance_advisor(core,GPU_DEC_MAX_EPK); if (core->opt.verbosity>1) INFO("%s","GPU event arrays ran out due to over segmented reads. If this message repeats, consider decreasing --cuda-max-epk"); } else{ //array filled AVG_EVENTS_PER_KMER being not enough load_balance_advisor(core,GPU_INC_AVG_EPK); if (core->opt.verbosity>1) INFO("%s","GPU event arrays ran out due to not enough being allocated for events. If this message repeats, consider increasing --cuda-avg-epk"); } } else{//else reduce the batch size load_balance_advisor(core,GPU_DEC_B); if (core->opt.verbosity>1) INFO("%s","GPU arrays ran out. If this message repeats, consider reducing --max-bases (-B option)"); } } else{ //slow CPU? if(stat_n_ultra_long_reads< db->n_bam_rec * thresh_reads && stat_n_too_many_events < db->n_bam_rec * thresh_reads){ load_balance_advisor(core,CPU_INC_MAX_EPK_LF); if (core->opt.verbosity>1) INFO("%s", "CPU got too much work. If this message repeats, consider increasing --cuda-max-epk or --cuda-max-lf"); } else{ if(stat_n_ultra_long_reads< db->n_bam_rec * thresh_reads){ load_balance_advisor(core,CPU_INC_MAX_LF); if (core->opt.verbosity>1) INFO("%s", "CPU got too much work. If this message repeats, consider increasing --cuda-max-lf"); } else if(stat_n_too_many_events < db->n_bam_rec * thresh_reads){ load_balance_advisor(core,CPU_INC_MAX_EPK); if (core->opt.verbosity>1) INFO("%s", "CPU got too much work. If this message repeats, consider increasing --cuda-max-epk"); } } } } else if(factor<-thresh_factor){ //gpu too much time load_balance_advisor(core,GPU_DEC_MAX_EPK_LF); if (core->opt.verbosity>1) INFO("%s", "GPU got too much work. If this message repeats, consider decreasing --cuda-max-epk or --cuda-max-lf"); } else{ if(event_array_usage<100-thresh*100 && read_array_usage<100-thresh*100){ if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU arrays are not fully utilised\n",__func__); if(db->n_bam_rec>=core->opt.batch_size){ load_balance_advisor(core,GPU_INC_K); if (core->opt.verbosity>1) INFO("%s", "GPU arrays are not fully utilised. If this message repeats, consider increasing the --batchsize (-K option)"); } else if(db->sum_bases >= core->opt.batch_size_bases){ load_balance_advisor(core,GPU_INC_B); if (core->opt.verbosity>1) INFO("%s", "GPU arrays are not fully utilised. If this message repeats, consider increasing the --max-bases (-B option)"); } else{ if (core->opt.verbosity>1) fprintf(stderr,"[%s] Probably the last batch\n",__func__); } } else{ if (core->opt.verbosity>1) fprintf(stderr,"[%s] No load balancing required\n",__func__); } } } void align_cuda(core_t* core, db_t* db) { int32_t i,j; int32_t n_bam_rec = db->n_bam_rec; int32_t n_bam_rec_cuda; double realtime1; int32_t n_ultra_long_reads=0; int32_t stat_n_ultra_long_reads=0; //number of ultralong reads processed on CPU int32_t stat_n_too_many_events=0; //number of reads with high avg events per base that are processed on CPU int32_t stat_n_gpu_mem_out=0; //number of reads run on CPU due to the GPU memory running out int32_t sum_bases_cpu=0; //The total sum of bases run on GPU int32_t ultra_long_reads[n_bam_rec]; //not only ultra-long reads, but also ones with large number of average events per base //cpu temp pointers int32_t* read_ptr_host; int32_t* n_events_host; int32_t* event_ptr_host; event_t* event_table_host; AlignedPair* event_align_pairs_host; int32_t* read_len_host; scalings_t* scalings_host; int32_t* n_event_align_pairs_host; char* read_host; /**cuda pointers*/ char* read; //flattened reads sequences int32_t* read_ptr; //index pointer for flattedned "reads" int32_t* read_len; int64_t sum_read_len; int32_t* n_events; event_t* event_table; int32_t* event_ptr; int64_t sum_n_events; scalings_t* scalings; AlignedPair* event_align_pairs; int32_t* n_event_align_pairs; float *bands; uint8_t *trace; EventKmerPair* band_lower_left; model_t* model_kmer_cache; model_t* model; realtime1 = realtime(); read_ptr_host = core->cuda->read_ptr_host; sum_read_len = 0; sum_n_events = 0; //read sequences : needflattening for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ read_ptr_host[j] = sum_read_len; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; j++; } else{ if ((db->et[i].n)/(float)(db->read_len[i]) < AVG_EVENTS_PER_KMER_MAX){ ultra_long_reads[n_ultra_long_reads]=i; n_ultra_long_reads++; sum_bases_cpu += db->read_len[i]; if(db->read_len[i]>=(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec)){ stat_n_ultra_long_reads++; if(core->opt.verbosity>2)STDERR("readlen>=%.0fkbases\t%d",(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec)/1000,db->read_len[i]); } else if ((db->et[i].n)/(float)(db->read_len[i]) >= AVG_EVENTS_PER_KMER_GPU_THRESH){ stat_n_too_many_events++; } else{ stat_n_gpu_mem_out++; } } else{//todo : too many avg events per base, even for the CPU db->n_event_align_pairs[i]=0; } } } n_bam_rec_cuda = j; //can start processing on the ultra long reads on the CPU pthread_arg_t *tmparg=NULL; pthread_t tid = align_cudb_async(&tmparg,core, db, ultra_long_reads, n_ultra_long_reads); double realtime_process_start=realtime(); read_len_host = core->cuda->read_len_host; scalings_host = core->cuda->scalings_host; n_event_align_pairs_host = core->cuda->n_event_align_pairs_host; //form the temporary flattened array on host read_host = (char*)malloc(sizeof(char) * sum_read_len); MALLOC_CHK(read_host); sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ int32_t idx = read_ptr_host[j]; strcpy(&read_host[idx], db->read[i]); read_len_host[j]=db->read_len[i]; scalings_host[j]=db->scalings[i]; j++; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; } } //now the events : need flattening //num events : need flattening //get the total size and create the pointers n_events_host = core->cuda->n_events_host; event_ptr_host = core->cuda->event_ptr_host; sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ n_events_host[j] = db->et[i].n; event_ptr_host[j] = sum_n_events; sum_n_events += db->et[i].n; j++; sum_read_len += (db->read_len[i] + 1); //with null term } } //event table flatten //form the temporary flattened array on host event_table_host = (event_t*)malloc(sizeof(event_t) * sum_n_events); MALLOC_CHK(event_table_host); sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ int32_t idx = event_ptr_host[j]; memcpy(&event_table_host[idx], db->et[i].event, sizeof(event_t) * db->et[i].n); j++; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; } } event_align_pairs_host = (AlignedPair*)malloc(2 * sum_n_events * sizeof(AlignedPair)); MALLOC_CHK(event_align_pairs_host); core->align_cuda_preprocess += (realtime() - realtime1); /** Start GPU mallocs**/ realtime1 = realtime(); read_ptr =core->cuda->read_ptr; read_len=core->cuda->read_len; n_events=core->cuda->n_events; event_ptr=core->cuda->event_ptr; scalings=core->cuda->scalings; model = core->cuda->model; n_event_align_pairs=core->cuda->n_event_align_pairs; #ifndef CUDA_DYNAMIC_MALLOC assert(sum_read_len <= core->cuda->max_sum_read_len); assert(sum_n_events <= floor(core->cuda->max_sum_read_len * AVG_EVENTS_PER_KMER)); //fprintf(stderr,"%d %d\n", sum_read_len,sum_n_events); if(core->opt.verbosity>1) STDERR("%.2f %% of GPU read arrays and %.2f %% of GPU event arrays were utilised", sum_read_len/(float)(core->cuda->max_sum_read_len)*100 , sum_n_events/(float)floor((core->cuda->max_sum_read_len)*AVG_EVENTS_PER_KMER)*100); read=(core->cuda->read); event_table=(core->cuda->event_table); model_kmer_cache=(core->cuda->model_kmer_cache); event_align_pairs=(core->cuda->event_align_pairs); bands=(core->cuda->bands); trace=(core->cuda->trace); band_lower_left=(core->cuda->band_lower_left); hipMemset(trace,0,sizeof(uint8_t) * (sum_n_events + sum_read_len) * ALN_BANDWIDTH); //initialise the trace array to 0 CUDA_CHK(); #else if(core->opt.verbosity>1) print_size("read array",sum_read_len * sizeof(char)); hipMalloc((void**)&read, sum_read_len * sizeof(char)); //with null char CUDA_CHK(); if(core->opt.verbosity>1) print_size("event table",sum_n_events * sizeof(event_t)); hipMalloc((void**)&event_table, sum_n_events * sizeof(event_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("model kmer cache",sum_read_len * sizeof(model_t)); hipMalloc((void**)&model_kmer_cache, sum_read_len * sizeof(model_t)); CUDA_CHK(); /**allocate output arrays for cuda**/ if(core->opt.verbosity>1) print_size("event align pairs",2 * sum_n_events *sizeof(AlignedPair)); hipMalloc((void**)&event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair)); //todo : need better huristic CUDA_CHK(); //scratch arrays size_t sum_n_bands = sum_n_events + sum_read_len; //todo : can be optimised if(core->opt.verbosity>1) print_size("bands",sizeof(float) * sum_n_bands * ALN_BANDWIDTH); hipMalloc((void**)&bands,sizeof(float) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); if(core->opt.verbosity>1) print_size("trace",sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); hipMalloc((void**)&trace, sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); hipMemset(trace,0,sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); //initialise the trace array to 0 CUDA_CHK(); if(core->opt.verbosity>1) print_size("band_lower_left",sizeof(EventKmerPair)* sum_n_bands); hipMalloc((void**)&band_lower_left, sizeof(EventKmerPair)* sum_n_bands); CUDA_CHK(); #endif core->align_cuda_malloc += (realtime() - realtime1); /* cuda mem copys*/ realtime1 =realtime(); hipMemcpy(read_ptr, read_ptr_host, n_bam_rec_cuda * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(read, read_host, sum_read_len * sizeof(char), hipMemcpyHostToDevice); CUDA_CHK(); //read length : already linear hence direct copy hipMemcpy(read_len, read_len_host, n_bam_rec_cuda * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(n_events, n_events_host, n_bam_rec_cuda * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(event_ptr, event_ptr_host, n_bam_rec_cuda * sizeof(int32_t), hipMemcpyHostToDevice); CUDA_CHK(); hipMemcpy(event_table, event_table_host, sizeof(event_t) * sum_n_events, hipMemcpyHostToDevice); CUDA_CHK(); //can be interleaved hipMemcpy(scalings, scalings_host, sizeof(scalings_t) * n_bam_rec_cuda, hipMemcpyHostToDevice); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); /*pre kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 gridpre(1,(n_bam_rec_cuda + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 blockpre(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); if(core->opt.verbosity>1) STDERR("grid %d,%d, block %d,%d",gridpre.x,gridpre.y, blockpre.x,blockpre.y); hipLaunchKernelGGL(( align_kernel_pre_2d), dim3(gridpre), dim3(blockpre), 0, 0, read, read_len, read_ptr, n_events, event_ptr, model, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left); hipDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-pre kernel done\n", __func__, realtime() - realtime1); core->align_kernel_time += (realtime() - realtime1); core->align_pre_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /* core kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 grid1(1,(n_bam_rec_cuda + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 block1(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); hipLaunchKernelGGL(( align_kernel_core_2d_shm), dim3(grid1), dim3(block1), 0, 0, read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left ); hipDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-core kernel done\n", __func__, realtime() - realtime1); core->align_kernel_time += (realtime() - realtime1); core->align_core_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /*post kernel*/ int32_t BLOCK_LEN = core->opt.cuda_block_size; dim3 gridpost((n_bam_rec_cuda + BLOCK_LEN - 1) / BLOCK_LEN); dim3 blockpost(BLOCK_LEN); #ifndef WARP_HACK hipLaunchKernelGGL(( align_kernel_post), dim3(gridpost), dim3(blockpost), 0, 0, event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr,scalings, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left ); #else assert(BLOCK_LEN>=32); dim3 grid1post((n_bam_rec_cuda + (BLOCK_LEN/32) - 1) / (BLOCK_LEN/32)); if(core->opt.verbosity>1) STDERR("grid new %d",grid1post.x); hipLaunchKernelGGL(( align_kernel_post), dim3(grid1post), dim3(blockpost), 0, 0, event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left ); #endif hipDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-post kernel done\n", __func__, realtime() - realtime1); core->align_kernel_time += (realtime() - realtime1); core->align_post_kernel_time += (realtime() - realtime1); //fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs); #ifdef CUDA_DEBUG hipDeviceSynchronize(); CUDA_CHK(); #endif /** copyback ans**/ realtime1 = realtime(); hipMemcpy(n_event_align_pairs_host, n_event_align_pairs, n_bam_rec_cuda * sizeof(int32_t), hipMemcpyDeviceToHost); CUDA_CHK(); hipMemcpy(event_align_pairs_host, event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair), hipMemcpyDeviceToHost); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); #ifdef CUDA_DYNAMIC_MALLOC hipFree(read); //with null char hipFree(event_table); hipFree(event_align_pairs); hipFree(bands); hipFree(trace); hipFree(band_lower_left); hipFree(model_kmer_cache); #endif core->align_cuda_malloc += (realtime() - realtime1); /** post work**/ realtime1 = realtime(); //copy back sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ int32_t idx = event_ptr_host[j]; db->n_event_align_pairs[i]=n_event_align_pairs_host[j]; #ifdef REVERSAL_ON_CPU int c; int end = db->n_event_align_pairs[i] - 1; AlignedPair* out_2= db->event_align_pairs[i]; AlignedPair* in_2= &event_align_pairs_host[idx * 2]; for (c = 0; c < db->n_event_align_pairs[i] ; c++) { out_2[c].ref_pos = in_2[end].ref_pos; out_2[c].read_pos = in_2[end].read_pos; end--; } #else memcpy(db->event_align_pairs[i], &event_align_pairs_host[idx * 2], sizeof(AlignedPair) * db->n_event_align_pairs[i]); #endif j++; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; } } //free the temp arrays on host free(read_host); free(event_table_host); free(event_align_pairs_host); core->align_cuda_postprocess += (realtime() - realtime1); double gpu_process_time = realtime()-realtime_process_start; realtime1 = realtime(); double cpu_process_time = align_cudb_async_join(tmparg,tid); core->extra_load_cpu += (realtime() - realtime1); if(core->opt.verbosity>1) { fprintf(stderr, "[%s::%.3fsec] CPU extra processing done (>=%.0fkbases:%d|>=%.1fevents:%d|gpu_mem_out:%d)\n", __func__,realtime() - realtime1,((core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec))/1000, stat_n_ultra_long_reads, AVG_EVENTS_PER_KMER_GPU_THRESH,stat_n_too_many_events, stat_n_gpu_mem_out); } STDERR("Load : CPU %d entries (%.1fM bases), GPU %d entries (%.1fM bases)", n_bam_rec-n_bam_rec_cuda, (float)sum_bases_cpu/(1000*1000),n_bam_rec_cuda, (float)sum_read_len/(1000*1000)); load_balance(core,db,cpu_process_time,gpu_process_time,stat_n_gpu_mem_out,stat_n_too_many_events, stat_n_ultra_long_reads, sum_read_len/(float)(core->cuda->max_sum_read_len)*100 , sum_n_events/(float)floor((core->cuda->max_sum_read_len)*AVG_EVENTS_PER_KMER)*100); } #endif
ef3cce1dec2ec53360f88720887700e35d1f4b10.cu
#include <errno.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "error.h" #include "f5c.h" #include "f5cmisc.cuh" #include "f5cmisc.h" void init_cuda(core_t* core){ cuda_exists(); int32_t cuda_device_num = core->opt.cuda_dev_id; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, cuda_device_num); CUDA_CHK(); STDERR("Running on %s (device id %d)",prop.name, cuda_device_num); //fprintf(stderr,"AVG_EVENTS_PER_KMER %f\n",AVG_EVENTS_PER_KMER); //fprintf(stderr,"AVG_EVENTS_PER_KMER %f\n",AVG_EVENTS_PER_KMER_GPU_THRESH); //fprintf(stderr,"readfac %f\n",core->opt.cuda_max_readlen); assert(AVG_EVENTS_PER_KMER>0 && AVG_EVENTS_PER_KMER>0); core->cuda = (cuda_data_t*)malloc(sizeof(cuda_data_t)); MALLOC_CHK(core->cuda); core->align_kernel_time=0; core->align_pre_kernel_time=0; core->align_core_kernel_time=0; core->align_post_kernel_time=0; core->align_cuda_malloc=0; core->extra_load_cpu=0; core->align_cuda_memcpy=0; core->align_cuda_postprocess=0; core->align_cuda_preprocess=0; core->previous = -1; core->previous_count = 0; #ifdef CUDA_PRE_MALLOC int32_t n_bam_rec = core->opt.batch_size; //cpu arrays core->cuda->read_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->read_ptr_host); core->cuda->n_events_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->n_events_host); core->cuda->event_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->event_ptr_host); core->cuda->read_len_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->read_len_host); core->cuda->scalings_host = (scalings_t*)malloc(sizeof(scalings_t) * n_bam_rec); MALLOC_CHK(core->cuda->scalings_host); core->cuda->n_event_align_pairs_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(core->cuda->n_event_align_pairs_host); //cuda arrays if(core->opt.verbosity>1) print_size("read_ptr array",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&(core->cuda->read_ptr), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("read_lens",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&(core->cuda->read_len), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //n_events if(core->opt.verbosity>1) print_size("n_events",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&(core->cuda->n_events), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //event ptr if(core->opt.verbosity>1) print_size("event ptr",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&(core->cuda->event_ptr), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //scalings : already linear if(core->opt.verbosity>1) print_size("Scalings",n_bam_rec * sizeof(scalings_t)); cudaMalloc((void**)&(core->cuda->scalings), n_bam_rec * sizeof(scalings_t)); CUDA_CHK(); cudaMalloc((void**)&(core->cuda->model), NUM_KMER * sizeof(model_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("n_event_align_pairs",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&(core->cuda->n_event_align_pairs), n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //model : already linear //move to cuda_init cudaMemcpy(core->cuda->model, core->model, NUM_KMER * sizeof(model_t), cudaMemcpyHostToDevice); CUDA_CHK(); #ifndef CUDA_DYNAMIC_MALLOC // //dynamic arrays //compute the maximum uint64_t free_mem = 0; if(prop.integrated==1){ //in tegra free mem should be sought differently free_mem=tegra_freemem(cuda_device_num); } else{ free_mem=cuda_freemem(cuda_device_num); } double factor = 1 * sizeof(char) + //read_capacity AVG_EVENTS_PER_KMER * sizeof(event_t) + //event_table_capacity 1 * sizeof(model_t) + //model_kmer_cache_capacity (AVG_EVENTS_PER_KMER * 2) * sizeof(AlignedPair) + //event_align_pairs_capacity (AVG_EVENTS_PER_KMER + 1) * ALN_BANDWIDTH * sizeof(float) + //bands_capacity (AVG_EVENTS_PER_KMER + 1) * ALN_BANDWIDTH * sizeof(uint8_t) + //trace_capacity (AVG_EVENTS_PER_KMER + 1) * sizeof(EventKmerPair) ; //band_lower_left_capacity uint64_t sum_read_len = 0; //if unset by user (or set to weird values by user) if(core->opt.cuda_mem_frac>=1.0f || core->opt.cuda_mem_frac<=0.0f){ if(prop.integrated==1){ //for tegra we have to reserve some space for RAM sum_read_len= floor(free_mem*TEGRA_MEM_FACTOR/factor); } else{ sum_read_len= floor(free_mem*MEM_FACTOR/factor); } } else{ sum_read_len= floor(free_mem*(core->opt.cuda_mem_frac)/factor); } core->cuda->max_sum_read_len = sum_read_len; uint64_t sum_n_events = floor(sum_read_len * AVG_EVENTS_PER_KMER); uint64_t read_capacity = sum_read_len * sizeof(char); uint64_t event_table_capacity = sum_n_events * sizeof(event_t); uint64_t model_kmer_cache_capacity= sum_read_len * sizeof(model_t); uint64_t event_align_pairs_capacity= sum_n_events * 2 * sizeof(AlignedPair); uint64_t bands_capacity = (sum_n_events + sum_read_len) * ALN_BANDWIDTH * sizeof(float) ; uint64_t trace_capacity = (sum_n_events + sum_read_len) * ALN_BANDWIDTH * sizeof(uint8_t) ; uint64_t band_lower_left_capacity = (sum_n_events + sum_read_len) * sizeof(EventKmerPair); assert(read_capacity + event_table_capacity + model_kmer_cache_capacity + event_align_pairs_capacity + bands_capacity + trace_capacity + band_lower_left_capacity <= free_mem); if(core->opt.verbosity>1) print_size("read_capacity",read_capacity); if(core->opt.verbosity>1) print_size("event_table_capacity",event_table_capacity); if(core->opt.verbosity>1) print_size("model_kmer_cache_capacity",model_kmer_cache_capacity); if(core->opt.verbosity>1) print_size("event_align_pairs_capacity",event_align_pairs_capacity); if(core->opt.verbosity>1) print_size("bands_capacity",bands_capacity); if(core->opt.verbosity>1) print_size("trace_capacity",trace_capacity); if(core->opt.verbosity>1) print_size("band_lower_left_capacity",band_lower_left_capacity); //input arrays cudaMalloc((void**)&(core->cuda->read), read_capacity); //with null char CUDA_CHK(); cudaMalloc((void**)&(core->cuda->event_table), event_table_capacity); CUDA_CHK(); cudaMalloc((void**)&(core->cuda->model_kmer_cache), model_kmer_cache_capacity); CUDA_CHK(); /**allocate output arrays for cuda**/ cudaMalloc((void**)&(core->cuda->event_align_pairs),event_align_pairs_capacity); //todo : need better huristic CUDA_CHK(); //scratch arrays cudaMalloc((void**)&(core->cuda->bands), bands_capacity); CUDA_CHK(); cudaMalloc((void**)&(core->cuda->trace), trace_capacity); CUDA_CHK(); cudaMalloc((void**)&(core->cuda->band_lower_left), band_lower_left_capacity); CUDA_CHK(); STDERR("Max GPU capacity %.1fM bases",core->cuda->max_sum_read_len/(1000.0*1000.0)); #endif #endif return; } void free_cuda(core_t* core){ #ifdef CUDA_PRE_MALLOC free(core->cuda->event_ptr_host); free(core->cuda->n_events_host); free(core->cuda->read_ptr_host); free(core->cuda->read_len_host); free(core->cuda->scalings_host); free(core->cuda->n_event_align_pairs_host); cudaFree(core->cuda->read_ptr); cudaFree(core->cuda->read_len); cudaFree(core->cuda->n_events); cudaFree(core->cuda->event_ptr); cudaFree(core->cuda->model); //constant memory cudaFree(core->cuda->scalings); cudaFree(core->cuda->n_event_align_pairs); #ifndef CUDA_DYNAMIC_MALLOC cudaFree(core->cuda->read); cudaFree(core->cuda->event_table); cudaFree(core->cuda->model_kmer_cache); cudaFree(core->cuda->event_align_pairs); cudaFree(core->cuda->bands); cudaFree(core->cuda->trace); cudaFree(core->cuda->band_lower_left); #endif #endif free(core->cuda); return; } #ifndef CPU_GPU_PROC void align_cuda(core_t* core, db_t* db) { int32_t i; int32_t n_bam_rec = db->n_bam_rec; double realtime1; /**cuda pointers*/ char* read; //flattened reads sequences int32_t* read_ptr; //index pointer for flattedned "reads" int32_t* read_len; int64_t sum_read_len; int32_t* n_events; event_t* event_table; int32_t* event_ptr; int64_t sum_n_events; scalings_t* scalings; AlignedPair* event_align_pairs; int32_t* n_event_align_pairs; float *bands; uint8_t *trace; EventKmerPair* band_lower_left; realtime1 = realtime(); #ifdef CUDA_PRE_MALLOC int32_t* read_ptr_host = core->cuda->read_ptr_host; #else //get the total size and create the pointers int32_t* read_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(read_ptr_host); #endif sum_read_len = 0; //read sequences : needflattening for (i = 0; i < n_bam_rec; i++) { read_ptr_host[i] = sum_read_len; sum_read_len += (db->read_len[i] + 1); //with null term } //form the temporary flattened array on host char* read_host = (char*)malloc(sizeof(char) * sum_read_len); MALLOC_CHK(read_host); for (i = 0; i < n_bam_rec; i++) { int32_t idx = read_ptr_host[i]; strcpy(&read_host[idx], db->read[i]); } //now the events : need flattening //num events : need flattening //get the total size and create the pointers #ifdef CUDA_PRE_MALLOC int32_t* n_events_host = core->cuda->n_events_host; int32_t* event_ptr_host = core->cuda->event_ptr_host; #else int32_t* n_events_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(n_events_host); int32_t* event_ptr_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec); MALLOC_CHK(event_ptr_host); #endif sum_n_events = 0; for (i = 0; i < n_bam_rec; i++) { n_events_host[i] = db->et[i].n; event_ptr_host[i] = sum_n_events; sum_n_events += db->et[i].n; } //event table flatten //form the temporary flattened array on host event_t* event_table_host = (event_t*)malloc(sizeof(event_t) * sum_n_events); MALLOC_CHK(event_table_host); for (i = 0; i < n_bam_rec; i++) { int32_t idx = event_ptr_host[i]; memcpy(&event_table_host[idx], db->et[i].event, sizeof(event_t) * db->et[i].n); } AlignedPair* event_align_pairs_host = (AlignedPair*)malloc(2 * sum_n_events * sizeof(AlignedPair)); MALLOC_CHK(event_align_pairs_host); core->align_cuda_preprocess += (realtime() - realtime1); /** Start GPU mallocs**/ realtime1 = realtime(); #ifdef CUDA_PRE_MALLOC read_ptr =core->cuda->read_ptr; read_len=core->cuda->read_len; n_events=core->cuda->n_events; event_ptr=core->cuda->event_ptr; scalings=core->cuda->scalings; model_t* model = core->cuda->model; #else if(core->opt.verbosity>1) print_size("read_ptr array",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&read_ptr, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("read_lens",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&read_len, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //n_events if(core->opt.verbosity>1) print_size("n_events",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&n_events, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //event ptr if(core->opt.verbosity>1) print_size("event ptr",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&event_ptr, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); //scalings : already linear if(core->opt.verbosity>1) print_size("Scalings",n_bam_rec * sizeof(scalings_t)); cudaMalloc((void**)&scalings, n_bam_rec * sizeof(scalings_t)); CUDA_CHK(); //model : already linear model_t* model; cudaMalloc((void**)&model, NUM_KMER * sizeof(model_t)); CUDA_CHK(); #endif if(core->opt.verbosity>1) print_size("read array",sum_read_len * sizeof(char)); cudaMalloc((void**)&read, sum_read_len * sizeof(char)); //with null char CUDA_CHK(); if(core->opt.verbosity>1) print_size("event table",sum_n_events * sizeof(event_t)); cudaMalloc((void**)&event_table, sum_n_events * sizeof(event_t)); CUDA_CHK(); model_t* model_kmer_cache; cudaMalloc((void**)&model_kmer_cache, sum_read_len * sizeof(model_t)); CUDA_CHK(); /**allocate output arrays for cuda**/ if(core->opt.verbosity>1) print_size("event align pairs",2 * sum_n_events *sizeof(AlignedPair)); cudaMalloc((void**)&event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair)); //todo : need better huristic CUDA_CHK(); #ifdef CUDA_PRE_MALLOC n_event_align_pairs=core->cuda->n_event_align_pairs; #else if(core->opt.verbosity>1) print_size("n_event_align_pairs",n_bam_rec * sizeof(int32_t)); cudaMalloc((void**)&n_event_align_pairs, n_bam_rec * sizeof(int32_t)); CUDA_CHK(); #endif //scratch arrays size_t sum_n_bands = sum_n_events + sum_read_len; //todo : can be optimised if(core->opt.verbosity>1) print_size("bands",sizeof(float) * sum_n_bands * ALN_BANDWIDTH); cudaMalloc((void**)&bands,sizeof(float) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); if(core->opt.verbosity>1) print_size("trace",sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); cudaMalloc((void**)&trace, sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); cudaMemset(trace,0,sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); //initialise the trace array to 0 if(core->opt.verbosity>1) print_size("band_lower_left",sizeof(EventKmerPair)* sum_n_bands); cudaMalloc((void**)&band_lower_left, sizeof(EventKmerPair)* sum_n_bands); CUDA_CHK(); core->align_cuda_malloc += (realtime() - realtime1); /* cuda mem copys*/ realtime1 =realtime(); cudaMemcpy(read_ptr, read_ptr_host, n_bam_rec * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(read, read_host, sum_read_len * sizeof(char), cudaMemcpyHostToDevice); CUDA_CHK(); //read length : already linear hence direct copy cudaMemcpy(read_len, db->read_len, n_bam_rec * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(n_events, n_events_host, n_bam_rec * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(event_ptr, event_ptr_host, n_bam_rec * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(event_table, event_table_host, sizeof(event_t) * sum_n_events, cudaMemcpyHostToDevice); CUDA_CHK(); #ifndef CUDA_PRE_MALLOC //model : already linear //move to cuda_init cudaMemcpy(model, core->model, NUM_KMER * sizeof(model_t), cudaMemcpyHostToDevice); CUDA_CHK(); #endif //can be interleaved cudaMemcpy(scalings, db->scalings, sizeof(scalings_t) * n_bam_rec, cudaMemcpyHostToDevice); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); /*pre kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 gridpre(1,(db->n_bam_rec + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 blockpre(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); if(core->opt.verbosity>1) fprintf(stderr,"grid %d,%d, block %d,%d\n",gridpre.x,gridpre.y, blockpre.x,blockpre.y); align_kernel_pre_2d<<<gridpre, blockpre>>>( read, read_len, read_ptr, n_events, event_ptr, model, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left); cudaDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3f*%.2f] align-pre kernel done\n", __func__, realtime() - realtime1, cputime() / (realtime() - realtime1)); core->align_kernel_time += (realtime() - realtime1); core->align_pre_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /* core kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 grid1(1,(db->n_bam_rec + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 block1(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); align_kernel_core_2d_shm<<<grid1, block1>>>(read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left ); cudaDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3f*%.2f] align-core kernel done\n", __func__, realtime() - realtime1, cputime() / (realtime() - realtime1)); core->align_kernel_time += (realtime() - realtime1); core->align_core_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /*post kernel*/ int32_t BLOCK_LEN = core->opt.cuda_block_size; dim3 gridpost((db->n_bam_rec + BLOCK_LEN - 1) / BLOCK_LEN); dim3 blockpost(BLOCK_LEN); #ifndef WARP_HACK align_kernel_post<<<gridpost, blockpost>>>(event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr,scalings, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left ); #else assert(BLOCK_LEN>=32); dim3 grid1post((db->n_bam_rec + (BLOCK_LEN/32) - 1) / (BLOCK_LEN/32)); if(core->opt.verbosity>1) fprintf(stderr,"grid new %d\n",grid1post.x); align_kernel_post<<<grid1post, blockpost>>>(event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec, model_kmer_cache,bands,trace,band_lower_left ); #endif cudaDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3f*%.2f] align-post kernel done\n", __func__, realtime() - realtime1, cputime() / (realtime() - realtime1)); core->align_kernel_time += (realtime() - realtime1); core->align_post_kernel_time += (realtime() - realtime1); //fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs); #ifdef CUDA_DEBUG cudaDeviceSynchronize(); CUDA_CHK(); #endif /** copyback ans**/ realtime1 = realtime(); cudaMemcpy(db->n_event_align_pairs, n_event_align_pairs, n_bam_rec * sizeof(int32_t), cudaMemcpyDeviceToHost); CUDA_CHK(); cudaMemcpy(event_align_pairs_host, event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair), cudaMemcpyDeviceToHost); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); #ifndef CUDA_PRE_MALLOC cudaFree(read_ptr); cudaFree(read_len); cudaFree(n_events); cudaFree(event_ptr); cudaFree(model); //constant memory cudaFree(scalings); cudaFree(n_event_align_pairs); #endif cudaFree(read); //with null char cudaFree(event_table); cudaFree(event_align_pairs); cudaFree(bands); cudaFree(trace); cudaFree(band_lower_left); cudaFree(model_kmer_cache); core->align_cuda_malloc += (realtime() - realtime1); /** post work**/ realtime1 = realtime(); //copy back for (i = 0; i < n_bam_rec; i++) { int32_t idx = event_ptr_host[i]; memcpy(db->event_align_pairs[i], &event_align_pairs_host[idx * 2], sizeof(AlignedPair) * db->n_event_align_pairs[i]); } //free the temp arrays on host #ifndef CUDA_PRE_MALLOC free(read_ptr_host); free(n_events_host); free(event_ptr_host); #endif free(read_host); free(event_table_host); free(event_align_pairs_host); core->align_cuda_postprocess += (realtime() - realtime1); } #else #ifdef WORK_STEAL static inline int32_t steal_work(pthread_arg_t* all_args, int32_t n_threads) { int32_t i, c_i = -1; int32_t k; for (i = 0; i < n_threads; ++i){ pthread_arg_t args = all_args[i]; //fprintf(stderr,"endi : %d, starti : %d\n",args.endi,args.starti); if (args.endi-args.starti > STEAL_THRESH_CUDA) { //fprintf(stderr,"gap : %d\n",args.endi-args.starti); c_i = i; break; } } if(c_i<0){ return -1; } k = __sync_fetch_and_add(&(all_args[c_i].starti), 1); //fprintf(stderr,"k : %d, end %d, start %d\n",k,all_args[c_i].endi,all_args[c_i].starti); return k >= all_args[c_i].endi ? -1 : k; } #endif void* pthread_cusingle(void* voidargs) { int32_t i,j; pthread_arg_t* args = (pthread_arg_t*)voidargs; db_t* db = args->db; core_t* core = args->core; #ifndef WORK_STEAL for (i = args->starti; i < args->endi; i++) { j=args->ultra_long_reads[i]; args->func(core,db,j); } #else pthread_arg_t* all_args = (pthread_arg_t*)(args->all_pthread_args); //adapted from ktherad for (;;) { i = __sync_fetch_and_add(&args->starti, 1); if (i >= args->endi) { break; } j=args->ultra_long_reads[i]; args->func(core,db,j); } while ((i = steal_work(all_args,core->opt.num_thread)) >= 0){ j=args->ultra_long_reads[i]; args->func(core,db,j); } #endif //fprintf(stderr,"Thread %d done\n",(myargs->position)/THREADS); pthread_exit(0); } void pthread_cudb(core_t* core, db_t* db, int32_t* ultra_long_reads, int32_t n_ultra_long_reads,void (*func)(core_t*,db_t*,int)){ //create threads pthread_t tids[core->opt.num_thread]; pthread_arg_t pt_args[core->opt.num_thread]; int32_t t, ret; int32_t i = 0; int32_t num_thread = core->opt.num_thread; int32_t step = (n_ultra_long_reads + num_thread - 1) / num_thread; //todo : check for higher num of threads than the data //current works but many threads are created despite //set the data structures for (t = 0; t < num_thread; t++) { pt_args[t].core = core; pt_args[t].db = db; pt_args[t].starti = i; i += step; if (i > n_ultra_long_reads) { pt_args[t].endi = n_ultra_long_reads; } else { pt_args[t].endi = i; } pt_args[t].func=func; pt_args[t].ultra_long_reads=ultra_long_reads; #ifdef WORK_STEAL pt_args[t].all_pthread_args = (void *)pt_args; #endif //fprintf(stderr,"t%d : %d-%d\n",t,pt_args[t].starti,pt_args[t].endi); } //create threads for(t = 0; t < core->opt.num_thread; t++){ ret = pthread_create(&tids[t], NULL, pthread_cusingle, (void*)(&pt_args[t])); NEG_CHK(ret); } //pthread joining for (t = 0; t < core->opt.num_thread; t++) { int ret = pthread_join(tids[t], NULL); NEG_CHK(ret); } } void* align_cudb(void* voidargs){ double realtime1 = realtime(); pthread_arg_t* args = (pthread_arg_t*)voidargs; db_t* db = args->db; core_t* core = args->core; int32_t* ultra_long_reads = args->ultra_long_reads; int32_t n_ultra_long_reads = args->endi; //fprintf(stderr,"ultra long guys : %d\n",n_ultra_long_reads); //fprintf(stderr, "cpu\n"); if (core->opt.num_thread == 1) { int j; for(j=0;j<n_ultra_long_reads;j++) { int32_t i = ultra_long_reads[j]; align_single(core, db, i); // db->n_event_align_pairs[i] = // align(db->event_align_pairs[i], db->read[i], // db->read_len[i], db->et[i], core->model, // db->scalings[i], db->f5[i]->sample_rate); //fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs); } } else { pthread_cudb(core, db, ultra_long_reads,n_ultra_long_reads,align_single); } args->ret1 = realtime() - realtime1; if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] %d reads processed on cpu\n", __func__, realtime() - realtime1, n_ultra_long_reads); return NULL; } pthread_t align_cudb_async(pthread_arg_t **pt_args_ptr,core_t* core, db_t* db, int32_t* ultra_long_reads, int32_t n_ultra_long_reads) { assert(*pt_args_ptr==NULL); *pt_args_ptr = (pthread_arg_t *)malloc(sizeof(pthread_arg_t)); pthread_arg_t *pt_args=*pt_args_ptr; MALLOC_CHK(pt_args); pt_args->core = core; pt_args->db = db; pt_args->starti = 0; pt_args->endi = n_ultra_long_reads; pt_args->ultra_long_reads=ultra_long_reads; pthread_t tid; int ret = pthread_create(&tid, NULL, align_cudb,(void*)(pt_args)); NEG_CHK(ret); return tid; } double align_cudb_async_join(pthread_arg_t *pt_args, pthread_t tid) { int ret = pthread_join(tid, NULL); NEG_CHK(ret); assert(pt_args); double time_cpu = pt_args->ret1; free(pt_args); return time_cpu; } //check if we have run out of space in the pre-allocated gpu arrays static inline int8_t if_gpu_mem_free(core_t* core, db_t* db, int32_t i,int64_t sum_read_len,int64_t sum_n_events){ #ifdef CUDA_DYNAMIC_MALLOC return 1; #else if((sum_read_len+(db->read_len[i] + 1) <= (int64_t)core->cuda->max_sum_read_len) && (sum_n_events+db->et[i].n <= floor(core->cuda->max_sum_read_len * AVG_EVENTS_PER_KMER)) ){ return 1; } else{ return 0; } #endif } //if a suitable candidate to be run on GPU //ultra-long reads as well as the reads with too many average events per base //are done of CPU static inline int8_t if_on_gpu(core_t* core, db_t* db, int32_t i){ if(db->read_len[i]<(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec) && (db->et[i].n)/(float)(db->read_len[i]) < AVG_EVENTS_PER_KMER_GPU_THRESH ){ return 1; } else{ return 0; } } #define GPU_DEC_MAX_LF 0 #define GPU_DEC_AVG_EPK 1 #define GPU_DEC_MAX_EPK 2 #define GPU_INC_AVG_EPK 3 #define GPU_DEC_B 4 #define CPU_INC_MAX_EPK_LF 5 #define CPU_INC_MAX_LF 6 #define CPU_INC_MAX_EPK 7 #define GPU_DEC_MAX_EPK_LF 8 #define GPU_INC_K 9 #define GPU_INC_B 10 static inline void load_balance_advisor(core_t* core, int32_t state){ if(core->previous==state){ core->previous_count++; if(core->previous_count>3){ switch (core->previous) { case GPU_DEC_MAX_LF : INFO("%s","GPU read arrays ran out due to ultra long reads. consider decreasing --cuda-max-lf"); break; case GPU_DEC_AVG_EPK : INFO("%s", "GPU read arrays ran out due to too much being allocated for event arrays. consider decreasing --cuda-avg-epk"); break; case GPU_DEC_MAX_EPK : INFO("%s","GPU event arrays ran out due to over segmented reads. consider decreasing --cuda-max-epk"); break; case GPU_INC_AVG_EPK : INFO("%s","GPU event arrays ran out due to not enough being allocated for events. consider increasing --cuda-avg-epk"); break; case GPU_DEC_B : INFO("%s","GPU arrays ran out. consider reducing --max-bases (-B option)"); break; case CPU_INC_MAX_EPK_LF : INFO("%s", "CPU got too much work. consider increasing --cuda-max-epk or --cuda-max-lf"); break; case CPU_INC_MAX_LF : INFO("%s", "CPU got too much work. consider increasing --cuda-max-lf"); break; case CPU_INC_MAX_EPK : INFO("%s", "CPU got too much work. consider increasing --cuda-max-epk"); break; case GPU_DEC_MAX_EPK_LF : INFO("%s", "GPU got too much work. consider decreasing --cuda-max-epk or --cuda-max-lf"); break; case GPU_INC_K : INFO("%s", "GPU arrays are not fully utilised. consider increasing the --batchsize (-K option)"); break; case GPU_INC_B : INFO("%s", "GPU arrays are not fully utilised. consider increasing the --max-bases (-B option)"); break; default : break; } } } else{ core->previous=state; core->previous_count=0; } } void load_balance(core_t *core, db_t *db, double cpu_process_time,double gpu_process_time, int32_t stat_n_gpu_mem_out, int32_t stat_n_too_many_events, int32_t stat_n_ultra_long_reads, float read_array_usage, float event_array_usage){ fprintf(stderr,"[%s] Processing time : CPU %.1f sec, GPU %.1f sec\n",__func__,cpu_process_time,gpu_process_time); double factor = (cpu_process_time-gpu_process_time)/(cpu_process_time+gpu_process_time); if (core->opt.verbosity>1) fprintf(stderr,"[%s] factor %f\n",__func__,factor); float thresh_factor=0.2; float thresh_reads=0.05; float thresh=0.2; if(factor>thresh_factor){ //cpu too much time if (core->opt.verbosity>1) fprintf(stderr,"[%s] CPU too much time\n",__func__); if(stat_n_gpu_mem_out > db->n_bam_rec * thresh_reads){ //gpu run out of memory if (core->opt.verbosity>1) fprintf(stderr,"[%s] Looks like the loaded dataset is too much for the GPU.\n",__func__); //are the GPU arrays balanced? give a warning if(read_array_usage>99 && event_array_usage<100-thresh*100){ //read array full if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU read array usage too high.\n",__func__); if(stat_n_ultra_long_reads> db->n_bam_rec * thresh_reads){ //array fileld sue to ultra long reads load_balance_advisor(core,GPU_DEC_MAX_LF); if (core->opt.verbosity>1) INFO("%s","GPU read arrays ran out due to ultra long reads. If this message repeats, consider decreasing --cuda-max-lf"); } else{ load_balance_advisor(core,GPU_DEC_AVG_EPK); if (core->opt.verbosity>1) INFO("%s", "GPU read arrays ran out due to too much being allocated for event arrays. If this message repeats, consider decreasing --cuda-avg-epk"); } } else if(event_array_usage>99 && read_array_usage<100-thresh*100){ //event array full if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU event array usage too high.\n",__func__); if(stat_n_too_many_events > db->n_bam_rec * thresh_reads){//array filled mainly due to reads with too many events load_balance_advisor(core,GPU_DEC_MAX_EPK); if (core->opt.verbosity>1) INFO("%s","GPU event arrays ran out due to over segmented reads. If this message repeats, consider decreasing --cuda-max-epk"); } else{ //array filled AVG_EVENTS_PER_KMER being not enough load_balance_advisor(core,GPU_INC_AVG_EPK); if (core->opt.verbosity>1) INFO("%s","GPU event arrays ran out due to not enough being allocated for events. If this message repeats, consider increasing --cuda-avg-epk"); } } else{//else reduce the batch size load_balance_advisor(core,GPU_DEC_B); if (core->opt.verbosity>1) INFO("%s","GPU arrays ran out. If this message repeats, consider reducing --max-bases (-B option)"); } } else{ //slow CPU? if(stat_n_ultra_long_reads< db->n_bam_rec * thresh_reads && stat_n_too_many_events < db->n_bam_rec * thresh_reads){ load_balance_advisor(core,CPU_INC_MAX_EPK_LF); if (core->opt.verbosity>1) INFO("%s", "CPU got too much work. If this message repeats, consider increasing --cuda-max-epk or --cuda-max-lf"); } else{ if(stat_n_ultra_long_reads< db->n_bam_rec * thresh_reads){ load_balance_advisor(core,CPU_INC_MAX_LF); if (core->opt.verbosity>1) INFO("%s", "CPU got too much work. If this message repeats, consider increasing --cuda-max-lf"); } else if(stat_n_too_many_events < db->n_bam_rec * thresh_reads){ load_balance_advisor(core,CPU_INC_MAX_EPK); if (core->opt.verbosity>1) INFO("%s", "CPU got too much work. If this message repeats, consider increasing --cuda-max-epk"); } } } } else if(factor<-thresh_factor){ //gpu too much time load_balance_advisor(core,GPU_DEC_MAX_EPK_LF); if (core->opt.verbosity>1) INFO("%s", "GPU got too much work. If this message repeats, consider decreasing --cuda-max-epk or --cuda-max-lf"); } else{ if(event_array_usage<100-thresh*100 && read_array_usage<100-thresh*100){ if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU arrays are not fully utilised\n",__func__); if(db->n_bam_rec>=core->opt.batch_size){ load_balance_advisor(core,GPU_INC_K); if (core->opt.verbosity>1) INFO("%s", "GPU arrays are not fully utilised. If this message repeats, consider increasing the --batchsize (-K option)"); } else if(db->sum_bases >= core->opt.batch_size_bases){ load_balance_advisor(core,GPU_INC_B); if (core->opt.verbosity>1) INFO("%s", "GPU arrays are not fully utilised. If this message repeats, consider increasing the --max-bases (-B option)"); } else{ if (core->opt.verbosity>1) fprintf(stderr,"[%s] Probably the last batch\n",__func__); } } else{ if (core->opt.verbosity>1) fprintf(stderr,"[%s] No load balancing required\n",__func__); } } } void align_cuda(core_t* core, db_t* db) { int32_t i,j; int32_t n_bam_rec = db->n_bam_rec; int32_t n_bam_rec_cuda; double realtime1; int32_t n_ultra_long_reads=0; int32_t stat_n_ultra_long_reads=0; //number of ultralong reads processed on CPU int32_t stat_n_too_many_events=0; //number of reads with high avg events per base that are processed on CPU int32_t stat_n_gpu_mem_out=0; //number of reads run on CPU due to the GPU memory running out int32_t sum_bases_cpu=0; //The total sum of bases run on GPU int32_t ultra_long_reads[n_bam_rec]; //not only ultra-long reads, but also ones with large number of average events per base //cpu temp pointers int32_t* read_ptr_host; int32_t* n_events_host; int32_t* event_ptr_host; event_t* event_table_host; AlignedPair* event_align_pairs_host; int32_t* read_len_host; scalings_t* scalings_host; int32_t* n_event_align_pairs_host; char* read_host; /**cuda pointers*/ char* read; //flattened reads sequences int32_t* read_ptr; //index pointer for flattedned "reads" int32_t* read_len; int64_t sum_read_len; int32_t* n_events; event_t* event_table; int32_t* event_ptr; int64_t sum_n_events; scalings_t* scalings; AlignedPair* event_align_pairs; int32_t* n_event_align_pairs; float *bands; uint8_t *trace; EventKmerPair* band_lower_left; model_t* model_kmer_cache; model_t* model; realtime1 = realtime(); read_ptr_host = core->cuda->read_ptr_host; sum_read_len = 0; sum_n_events = 0; //read sequences : needflattening for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ read_ptr_host[j] = sum_read_len; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; j++; } else{ if ((db->et[i].n)/(float)(db->read_len[i]) < AVG_EVENTS_PER_KMER_MAX){ ultra_long_reads[n_ultra_long_reads]=i; n_ultra_long_reads++; sum_bases_cpu += db->read_len[i]; if(db->read_len[i]>=(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec)){ stat_n_ultra_long_reads++; if(core->opt.verbosity>2)STDERR("readlen>=%.0fkbases\t%d",(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec)/1000,db->read_len[i]); } else if ((db->et[i].n)/(float)(db->read_len[i]) >= AVG_EVENTS_PER_KMER_GPU_THRESH){ stat_n_too_many_events++; } else{ stat_n_gpu_mem_out++; } } else{//todo : too many avg events per base, even for the CPU db->n_event_align_pairs[i]=0; } } } n_bam_rec_cuda = j; //can start processing on the ultra long reads on the CPU pthread_arg_t *tmparg=NULL; pthread_t tid = align_cudb_async(&tmparg,core, db, ultra_long_reads, n_ultra_long_reads); double realtime_process_start=realtime(); read_len_host = core->cuda->read_len_host; scalings_host = core->cuda->scalings_host; n_event_align_pairs_host = core->cuda->n_event_align_pairs_host; //form the temporary flattened array on host read_host = (char*)malloc(sizeof(char) * sum_read_len); MALLOC_CHK(read_host); sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ int32_t idx = read_ptr_host[j]; strcpy(&read_host[idx], db->read[i]); read_len_host[j]=db->read_len[i]; scalings_host[j]=db->scalings[i]; j++; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; } } //now the events : need flattening //num events : need flattening //get the total size and create the pointers n_events_host = core->cuda->n_events_host; event_ptr_host = core->cuda->event_ptr_host; sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ n_events_host[j] = db->et[i].n; event_ptr_host[j] = sum_n_events; sum_n_events += db->et[i].n; j++; sum_read_len += (db->read_len[i] + 1); //with null term } } //event table flatten //form the temporary flattened array on host event_table_host = (event_t*)malloc(sizeof(event_t) * sum_n_events); MALLOC_CHK(event_table_host); sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ int32_t idx = event_ptr_host[j]; memcpy(&event_table_host[idx], db->et[i].event, sizeof(event_t) * db->et[i].n); j++; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; } } event_align_pairs_host = (AlignedPair*)malloc(2 * sum_n_events * sizeof(AlignedPair)); MALLOC_CHK(event_align_pairs_host); core->align_cuda_preprocess += (realtime() - realtime1); /** Start GPU mallocs**/ realtime1 = realtime(); read_ptr =core->cuda->read_ptr; read_len=core->cuda->read_len; n_events=core->cuda->n_events; event_ptr=core->cuda->event_ptr; scalings=core->cuda->scalings; model = core->cuda->model; n_event_align_pairs=core->cuda->n_event_align_pairs; #ifndef CUDA_DYNAMIC_MALLOC assert(sum_read_len <= core->cuda->max_sum_read_len); assert(sum_n_events <= floor(core->cuda->max_sum_read_len * AVG_EVENTS_PER_KMER)); //fprintf(stderr,"%d %d\n", sum_read_len,sum_n_events); if(core->opt.verbosity>1) STDERR("%.2f %% of GPU read arrays and %.2f %% of GPU event arrays were utilised", sum_read_len/(float)(core->cuda->max_sum_read_len)*100 , sum_n_events/(float)floor((core->cuda->max_sum_read_len)*AVG_EVENTS_PER_KMER)*100); read=(core->cuda->read); event_table=(core->cuda->event_table); model_kmer_cache=(core->cuda->model_kmer_cache); event_align_pairs=(core->cuda->event_align_pairs); bands=(core->cuda->bands); trace=(core->cuda->trace); band_lower_left=(core->cuda->band_lower_left); cudaMemset(trace,0,sizeof(uint8_t) * (sum_n_events + sum_read_len) * ALN_BANDWIDTH); //initialise the trace array to 0 CUDA_CHK(); #else if(core->opt.verbosity>1) print_size("read array",sum_read_len * sizeof(char)); cudaMalloc((void**)&read, sum_read_len * sizeof(char)); //with null char CUDA_CHK(); if(core->opt.verbosity>1) print_size("event table",sum_n_events * sizeof(event_t)); cudaMalloc((void**)&event_table, sum_n_events * sizeof(event_t)); CUDA_CHK(); if(core->opt.verbosity>1) print_size("model kmer cache",sum_read_len * sizeof(model_t)); cudaMalloc((void**)&model_kmer_cache, sum_read_len * sizeof(model_t)); CUDA_CHK(); /**allocate output arrays for cuda**/ if(core->opt.verbosity>1) print_size("event align pairs",2 * sum_n_events *sizeof(AlignedPair)); cudaMalloc((void**)&event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair)); //todo : need better huristic CUDA_CHK(); //scratch arrays size_t sum_n_bands = sum_n_events + sum_read_len; //todo : can be optimised if(core->opt.verbosity>1) print_size("bands",sizeof(float) * sum_n_bands * ALN_BANDWIDTH); cudaMalloc((void**)&bands,sizeof(float) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); if(core->opt.verbosity>1) print_size("trace",sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); cudaMalloc((void**)&trace, sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); CUDA_CHK(); cudaMemset(trace,0,sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); //initialise the trace array to 0 CUDA_CHK(); if(core->opt.verbosity>1) print_size("band_lower_left",sizeof(EventKmerPair)* sum_n_bands); cudaMalloc((void**)&band_lower_left, sizeof(EventKmerPair)* sum_n_bands); CUDA_CHK(); #endif core->align_cuda_malloc += (realtime() - realtime1); /* cuda mem copys*/ realtime1 =realtime(); cudaMemcpy(read_ptr, read_ptr_host, n_bam_rec_cuda * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(read, read_host, sum_read_len * sizeof(char), cudaMemcpyHostToDevice); CUDA_CHK(); //read length : already linear hence direct copy cudaMemcpy(read_len, read_len_host, n_bam_rec_cuda * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(n_events, n_events_host, n_bam_rec_cuda * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(event_ptr, event_ptr_host, n_bam_rec_cuda * sizeof(int32_t), cudaMemcpyHostToDevice); CUDA_CHK(); cudaMemcpy(event_table, event_table_host, sizeof(event_t) * sum_n_events, cudaMemcpyHostToDevice); CUDA_CHK(); //can be interleaved cudaMemcpy(scalings, scalings_host, sizeof(scalings_t) * n_bam_rec_cuda, cudaMemcpyHostToDevice); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); /*pre kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 gridpre(1,(n_bam_rec_cuda + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 blockpre(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); if(core->opt.verbosity>1) STDERR("grid %d,%d, block %d,%d",gridpre.x,gridpre.y, blockpre.x,blockpre.y); align_kernel_pre_2d<<<gridpre, blockpre>>>( read, read_len, read_ptr, n_events, event_ptr, model, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left); cudaDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-pre kernel done\n", __func__, realtime() - realtime1); core->align_kernel_time += (realtime() - realtime1); core->align_pre_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /* core kernel*/ assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH); dim3 grid1(1,(n_bam_rec_cuda + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS); dim3 block1(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS); align_kernel_core_2d_shm<<<grid1, block1>>>(read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left ); cudaDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-core kernel done\n", __func__, realtime() - realtime1); core->align_kernel_time += (realtime() - realtime1); core->align_core_kernel_time += (realtime() - realtime1); realtime1 = realtime(); /*post kernel*/ int32_t BLOCK_LEN = core->opt.cuda_block_size; dim3 gridpost((n_bam_rec_cuda + BLOCK_LEN - 1) / BLOCK_LEN); dim3 blockpost(BLOCK_LEN); #ifndef WARP_HACK align_kernel_post<<<gridpost, blockpost>>>(event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr,scalings, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left ); #else assert(BLOCK_LEN>=32); dim3 grid1post((n_bam_rec_cuda + (BLOCK_LEN/32) - 1) / (BLOCK_LEN/32)); if(core->opt.verbosity>1) STDERR("grid new %d",grid1post.x); align_kernel_post<<<grid1post, blockpost>>>(event_align_pairs, n_event_align_pairs, read_len, read_ptr, event_table, n_events, event_ptr, scalings, n_bam_rec_cuda, model_kmer_cache,bands,trace,band_lower_left ); #endif cudaDeviceSynchronize();CUDA_CHK(); if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-post kernel done\n", __func__, realtime() - realtime1); core->align_kernel_time += (realtime() - realtime1); core->align_post_kernel_time += (realtime() - realtime1); //fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs); #ifdef CUDA_DEBUG cudaDeviceSynchronize(); CUDA_CHK(); #endif /** copyback ans**/ realtime1 = realtime(); cudaMemcpy(n_event_align_pairs_host, n_event_align_pairs, n_bam_rec_cuda * sizeof(int32_t), cudaMemcpyDeviceToHost); CUDA_CHK(); cudaMemcpy(event_align_pairs_host, event_align_pairs, 2 * sum_n_events * sizeof(AlignedPair), cudaMemcpyDeviceToHost); CUDA_CHK(); core->align_cuda_memcpy += (realtime() - realtime1); realtime1 = realtime(); #ifdef CUDA_DYNAMIC_MALLOC cudaFree(read); //with null char cudaFree(event_table); cudaFree(event_align_pairs); cudaFree(bands); cudaFree(trace); cudaFree(band_lower_left); cudaFree(model_kmer_cache); #endif core->align_cuda_malloc += (realtime() - realtime1); /** post work**/ realtime1 = realtime(); //copy back sum_read_len = 0; sum_n_events = 0; for (i = 0,j=0; i < n_bam_rec; i++) { if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){ int32_t idx = event_ptr_host[j]; db->n_event_align_pairs[i]=n_event_align_pairs_host[j]; #ifdef REVERSAL_ON_CPU int c; int end = db->n_event_align_pairs[i] - 1; AlignedPair* out_2= db->event_align_pairs[i]; AlignedPair* in_2= &event_align_pairs_host[idx * 2]; for (c = 0; c < db->n_event_align_pairs[i] ; c++) { out_2[c].ref_pos = in_2[end].ref_pos; out_2[c].read_pos = in_2[end].read_pos; end--; } #else memcpy(db->event_align_pairs[i], &event_align_pairs_host[idx * 2], sizeof(AlignedPair) * db->n_event_align_pairs[i]); #endif j++; sum_read_len += (db->read_len[i] + 1); //with null term sum_n_events += db->et[i].n; } } //free the temp arrays on host free(read_host); free(event_table_host); free(event_align_pairs_host); core->align_cuda_postprocess += (realtime() - realtime1); double gpu_process_time = realtime()-realtime_process_start; realtime1 = realtime(); double cpu_process_time = align_cudb_async_join(tmparg,tid); core->extra_load_cpu += (realtime() - realtime1); if(core->opt.verbosity>1) { fprintf(stderr, "[%s::%.3fsec] CPU extra processing done (>=%.0fkbases:%d|>=%.1fevents:%d|gpu_mem_out:%d)\n", __func__,realtime() - realtime1,((core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec))/1000, stat_n_ultra_long_reads, AVG_EVENTS_PER_KMER_GPU_THRESH,stat_n_too_many_events, stat_n_gpu_mem_out); } STDERR("Load : CPU %d entries (%.1fM bases), GPU %d entries (%.1fM bases)", n_bam_rec-n_bam_rec_cuda, (float)sum_bases_cpu/(1000*1000),n_bam_rec_cuda, (float)sum_read_len/(1000*1000)); load_balance(core,db,cpu_process_time,gpu_process_time,stat_n_gpu_mem_out,stat_n_too_many_events, stat_n_ultra_long_reads, sum_read_len/(float)(core->cuda->max_sum_read_len)*100 , sum_n_events/(float)floor((core->cuda->max_sum_read_len)*AVG_EVENTS_PER_KMER)*100); } #endif
gauge_stout.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <tune_quda.h> #include <gauge_field.h> #include <jitify_helper.cuh> #include <kernels/gauge_stout.cuh> #include <instantiate.h> namespace quda { template <typename Float, int nColor, QudaReconstructType recon> class GaugeSTOUT : TunableVectorYZ { static constexpr int stoutDim = 3; // apply stouting in space only GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg; const GaugeField &meta; bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: // (2,3): 2 for parity in the y thread dim, 3 corresponds to mapping direction to the z thread dim GaugeSTOUT(GaugeField &out, const GaugeField &in, double rho) : TunableVectorYZ(2, stoutDim), arg(out, in, rho), meta(in) { strcpy(aux, meta.AuxString()); strcat(aux, comm_dim_partitioned_string()); #ifdef JITIFY create_jitify_program("kernels/gauge_stout.cuh"); #endif apply(0); qudaDeviceSynchronize(); } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::computeSTOUTStep").instantiate(Type<Arg>()) .configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg); #else hipLaunchKernelGGL(( computeSTOUTStep), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, 0, arg); #endif } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } void preTune() { arg.out.save(); } // defensive measure in case they alias void postTune() { arg.out.load(); } long long flops() const { return 3 * (2 + 2 * 4) * 198ll * arg.threads; } // just counts matrix multiplication long long bytes() const { return 3 * ((1 + 2 * 6) * arg.in.Bytes() + arg.out.Bytes()) * arg.threads; } }; // GaugeSTOUT void STOUTStep(GaugeField &out, const GaugeField &in, double rho) { #ifdef GPU_GAUGE_TOOLS checkPrecision(out, in); checkReconstruct(out, in); if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct()); if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct()); instantiate<GaugeSTOUT>(out, in, rho); #else errorQuda("Gauge tools are not built"); #endif } template <typename Float, int nColor, QudaReconstructType recon> class GaugeOvrImpSTOUT : TunableVectorYZ { static constexpr int stoutDim = 4; // apply stouting in space only GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg; const GaugeField &meta; bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeOvrImpSTOUT(GaugeField &out, const GaugeField &in, double rho, double epsilon) : TunableVectorYZ(2, stoutDim), arg(out, in, rho, epsilon), meta(in) { strcpy(aux, meta.AuxString()); strcat(aux, comm_dim_partitioned_string()); #ifdef JITIFY create_jitify_program("kernels/gauge_stout.cuh"); #endif apply(0); qudaDeviceSynchronize(); } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::computeOvrImpSTOUTStep").instantiate(Type<Arg>()) .configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg); #else hipLaunchKernelGGL(( computeOvrImpSTOUTStep), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, 0, arg); #endif } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } void preTune() { arg.out.save(); } // defensive measure in case they alias void postTune() { arg.out.load(); } long long flops() const { return 4*(18+2+2*4)*198ll*arg.threads; } // just counts matrix multiplication long long bytes() const { return 4*((1+2*12)*arg.in.Bytes()+arg.out.Bytes())*arg.threads; } }; // GaugeOvrImpSTOUT void OvrImpSTOUTStep(GaugeField &out, const GaugeField& in, double rho, double epsilon) { #ifdef GPU_GAUGE_TOOLS checkPrecision(out, in); checkReconstruct(out, in); if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct()); if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct()); instantiate<GaugeOvrImpSTOUT>(out, in, rho, epsilon); #else errorQuda("Gauge tools are not built"); #endif } }
gauge_stout.cu
#include <quda_internal.h> #include <tune_quda.h> #include <gauge_field.h> #include <jitify_helper.cuh> #include <kernels/gauge_stout.cuh> #include <instantiate.h> namespace quda { template <typename Float, int nColor, QudaReconstructType recon> class GaugeSTOUT : TunableVectorYZ { static constexpr int stoutDim = 3; // apply stouting in space only GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg; const GaugeField &meta; bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: // (2,3): 2 for parity in the y thread dim, 3 corresponds to mapping direction to the z thread dim GaugeSTOUT(GaugeField &out, const GaugeField &in, double rho) : TunableVectorYZ(2, stoutDim), arg(out, in, rho), meta(in) { strcpy(aux, meta.AuxString()); strcat(aux, comm_dim_partitioned_string()); #ifdef JITIFY create_jitify_program("kernels/gauge_stout.cuh"); #endif apply(0); qudaDeviceSynchronize(); } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::computeSTOUTStep").instantiate(Type<Arg>()) .configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg); #else computeSTOUTStep<<<tp.grid, tp.block, tp.shared_bytes>>>(arg); #endif } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } void preTune() { arg.out.save(); } // defensive measure in case they alias void postTune() { arg.out.load(); } long long flops() const { return 3 * (2 + 2 * 4) * 198ll * arg.threads; } // just counts matrix multiplication long long bytes() const { return 3 * ((1 + 2 * 6) * arg.in.Bytes() + arg.out.Bytes()) * arg.threads; } }; // GaugeSTOUT void STOUTStep(GaugeField &out, const GaugeField &in, double rho) { #ifdef GPU_GAUGE_TOOLS checkPrecision(out, in); checkReconstruct(out, in); if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct()); if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct()); instantiate<GaugeSTOUT>(out, in, rho); #else errorQuda("Gauge tools are not built"); #endif } template <typename Float, int nColor, QudaReconstructType recon> class GaugeOvrImpSTOUT : TunableVectorYZ { static constexpr int stoutDim = 4; // apply stouting in space only GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg; const GaugeField &meta; bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeOvrImpSTOUT(GaugeField &out, const GaugeField &in, double rho, double epsilon) : TunableVectorYZ(2, stoutDim), arg(out, in, rho, epsilon), meta(in) { strcpy(aux, meta.AuxString()); strcat(aux, comm_dim_partitioned_string()); #ifdef JITIFY create_jitify_program("kernels/gauge_stout.cuh"); #endif apply(0); qudaDeviceSynchronize(); } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::computeOvrImpSTOUTStep").instantiate(Type<Arg>()) .configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg); #else computeOvrImpSTOUTStep<<<tp.grid, tp.block, tp.shared_bytes>>>(arg); #endif } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } void preTune() { arg.out.save(); } // defensive measure in case they alias void postTune() { arg.out.load(); } long long flops() const { return 4*(18+2+2*4)*198ll*arg.threads; } // just counts matrix multiplication long long bytes() const { return 4*((1+2*12)*arg.in.Bytes()+arg.out.Bytes())*arg.threads; } }; // GaugeOvrImpSTOUT void OvrImpSTOUTStep(GaugeField &out, const GaugeField& in, double rho, double epsilon) { #ifdef GPU_GAUGE_TOOLS checkPrecision(out, in); checkReconstruct(out, in); if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct()); if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct()); instantiate<GaugeOvrImpSTOUT>(out, in, rho, epsilon); #else errorQuda("Gauge tools are not built"); #endif } }
ccc316597d7a6f50ef8c7340a68058869d92a5cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define STORE_DERIVATIVE_1(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (deriv##INDEX##_1*0x100000000))); #define STORE_DERIVATIVE_2(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].deriv##INDEX*0x100000000))); typedef struct { real3 pos; real3 force; ATOM_PARAMETER_DATA #ifdef NEED_PADDING float padding; #endif } AtomData; /** * Compute a force based on pair interactions. */ extern "C" __global__ void computeN2Energy(unsigned long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq, const unsigned int* __restrict__ exclusions, const ushort2* __restrict__ exclusionTiles, #ifdef USE_CUTOFF const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize, unsigned int maxTiles, const real4* __restrict__ blockCenter, const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms #else unsigned int numTiles #endif PARAMETER_ARGUMENTS) { const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); const unsigned int tbx = threadIdx.x - tgx; real energy = 0; __shared__ AtomData localData[THREAD_BLOCK_SIZE]; // First loop: process tiles that contain exclusions. const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) { const ushort2 tileIndices = exclusionTiles[pos]; const unsigned int x = tileIndices.x; const unsigned int y = tileIndices.y; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES unsigned int atom1 = x*TILE_SIZE + tgx; real4 pos1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_EXCLUSIONS unsigned int excl = exclusions[pos*TILE_SIZE+tgx]; #endif if (x == y) { // This tile is on the diagonal. const unsigned int localAtomIndex = threadIdx.x; localData[localAtomIndex].pos = make_real3(pos1.x, pos1.y, pos1.z); LOAD_LOCAL_PARAMETERS_FROM_1 for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+j; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+j; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && atom1 != atom2) { COMPUTE_INTERACTION dEdR /= -r; } energy += 0.5f*tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif } } else { // This is an off-diagonal tile. const unsigned int localAtomIndex = threadIdx.x; unsigned int j = y*TILE_SIZE + tgx; real4 tempPosq = posq[j]; localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z); LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES #ifdef USE_EXCLUSIONS excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx)); #endif unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+tj; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); STORE_DERIVATIVES_1 if (x != y) { offset = y*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); STORE_DERIVATIVES_2 } } // Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all // of them (no cutoff). #ifdef USE_CUTOFF unsigned int numTiles = interactionCount[0]; int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); #else int pos = (int) (warp*(long long)numTiles/totalWarps); int end = (int) ((warp+1)*(long long)numTiles/totalWarps); #endif int skipBase = 0; int currentSkipIndex = tbx; __shared__ int atomIndices[THREAD_BLOCK_SIZE]; __shared__ volatile int skipTiles[THREAD_BLOCK_SIZE]; skipTiles[threadIdx.x] = -1; while (pos < end) { const bool isExcluded = false; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES bool includeTile = true; // Extract the coordinates of this tile. int x, y; bool singlePeriodicCopy = false; #ifdef USE_CUTOFF if (numTiles <= maxTiles) { x = tiles[pos]; real4 blockSizeX = blockSize[x]; singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF && 0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF && 0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF); } else #endif { y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } // Skip over tiles that have exclusions, since they were already processed. while (skipTiles[tbx+TILE_SIZE-1] < pos) { if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) { ushort2 tile = exclusionTiles[skipBase+tgx]; skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2; } else skipTiles[threadIdx.x] = end; skipBase += TILE_SIZE; currentSkipIndex = tbx; } while (skipTiles[currentSkipIndex] < pos) currentSkipIndex++; includeTile = (skipTiles[currentSkipIndex] != pos); } if (includeTile) { unsigned int atom1 = x*TILE_SIZE + tgx; // Load atom data for this tile. real4 pos1 = posq[atom1]; LOAD_ATOM1_PARAMETERS const unsigned int localAtomIndex = threadIdx.x; #ifdef USE_CUTOFF unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx); #else unsigned int j = y*TILE_SIZE + tgx; #endif atomIndices[threadIdx.x] = j; if (j < PADDED_NUM_ATOMS) { real4 tempPosq = posq[j]; localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z); LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES } #ifdef USE_PERIODIC if (singlePeriodicCopy) { // The box is small enough that we can just translate all the atoms into a single periodic // box, then skip having to apply periodic boundary conditions later. real4 blockCenterX = blockCenter[x]; pos1.x -= floor((pos1.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; pos1.y -= floor((pos1.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; pos1.z -= floor((pos1.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; localData[threadIdx.x].pos.x -= floor((localData[threadIdx.x].pos.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; localData[threadIdx.x].pos.y -= floor((localData[threadIdx.x].pos.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; localData[threadIdx.x].pos.z -= floor((localData[threadIdx.x].pos.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } else #endif { // We need to apply periodic boundary conditions separately for each interaction. unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); unsigned int offset = atom1; STORE_DERIVATIVES_1 #ifdef USE_CUTOFF unsigned int atom2 = atomIndices[threadIdx.x]; #else unsigned int atom2 = y*TILE_SIZE + tgx; #endif if (atom2 < PADDED_NUM_ATOMS) { atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); offset = atom2; STORE_DERIVATIVES_2 } } pos++; } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; }
ccc316597d7a6f50ef8c7340a68058869d92a5cd.cu
#define STORE_DERIVATIVE_1(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (deriv##INDEX##_1*0x100000000))); #define STORE_DERIVATIVE_2(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].deriv##INDEX*0x100000000))); typedef struct { real3 pos; real3 force; ATOM_PARAMETER_DATA #ifdef NEED_PADDING float padding; #endif } AtomData; /** * Compute a force based on pair interactions. */ extern "C" __global__ void computeN2Energy(unsigned long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq, const unsigned int* __restrict__ exclusions, const ushort2* __restrict__ exclusionTiles, #ifdef USE_CUTOFF const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize, unsigned int maxTiles, const real4* __restrict__ blockCenter, const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms #else unsigned int numTiles #endif PARAMETER_ARGUMENTS) { const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); const unsigned int tbx = threadIdx.x - tgx; real energy = 0; __shared__ AtomData localData[THREAD_BLOCK_SIZE]; // First loop: process tiles that contain exclusions. const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) { const ushort2 tileIndices = exclusionTiles[pos]; const unsigned int x = tileIndices.x; const unsigned int y = tileIndices.y; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES unsigned int atom1 = x*TILE_SIZE + tgx; real4 pos1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_EXCLUSIONS unsigned int excl = exclusions[pos*TILE_SIZE+tgx]; #endif if (x == y) { // This tile is on the diagonal. const unsigned int localAtomIndex = threadIdx.x; localData[localAtomIndex].pos = make_real3(pos1.x, pos1.y, pos1.z); LOAD_LOCAL_PARAMETERS_FROM_1 for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+j; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+j; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && atom1 != atom2) { COMPUTE_INTERACTION dEdR /= -r; } energy += 0.5f*tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif } } else { // This is an off-diagonal tile. const unsigned int localAtomIndex = threadIdx.x; unsigned int j = y*TILE_SIZE + tgx; real4 tempPosq = posq[j]; localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z); LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES #ifdef USE_EXCLUSIONS excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx)); #endif unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+tj; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); STORE_DERIVATIVES_1 if (x != y) { offset = y*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); STORE_DERIVATIVES_2 } } // Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all // of them (no cutoff). #ifdef USE_CUTOFF unsigned int numTiles = interactionCount[0]; int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); #else int pos = (int) (warp*(long long)numTiles/totalWarps); int end = (int) ((warp+1)*(long long)numTiles/totalWarps); #endif int skipBase = 0; int currentSkipIndex = tbx; __shared__ int atomIndices[THREAD_BLOCK_SIZE]; __shared__ volatile int skipTiles[THREAD_BLOCK_SIZE]; skipTiles[threadIdx.x] = -1; while (pos < end) { const bool isExcluded = false; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES bool includeTile = true; // Extract the coordinates of this tile. int x, y; bool singlePeriodicCopy = false; #ifdef USE_CUTOFF if (numTiles <= maxTiles) { x = tiles[pos]; real4 blockSizeX = blockSize[x]; singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF && 0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF && 0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF); } else #endif { y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } // Skip over tiles that have exclusions, since they were already processed. while (skipTiles[tbx+TILE_SIZE-1] < pos) { if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) { ushort2 tile = exclusionTiles[skipBase+tgx]; skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2; } else skipTiles[threadIdx.x] = end; skipBase += TILE_SIZE; currentSkipIndex = tbx; } while (skipTiles[currentSkipIndex] < pos) currentSkipIndex++; includeTile = (skipTiles[currentSkipIndex] != pos); } if (includeTile) { unsigned int atom1 = x*TILE_SIZE + tgx; // Load atom data for this tile. real4 pos1 = posq[atom1]; LOAD_ATOM1_PARAMETERS const unsigned int localAtomIndex = threadIdx.x; #ifdef USE_CUTOFF unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx); #else unsigned int j = y*TILE_SIZE + tgx; #endif atomIndices[threadIdx.x] = j; if (j < PADDED_NUM_ATOMS) { real4 tempPosq = posq[j]; localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z); LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES } #ifdef USE_PERIODIC if (singlePeriodicCopy) { // The box is small enough that we can just translate all the atoms into a single periodic // box, then skip having to apply periodic boundary conditions later. real4 blockCenterX = blockCenter[x]; pos1.x -= floor((pos1.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; pos1.y -= floor((pos1.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; pos1.z -= floor((pos1.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; localData[threadIdx.x].pos.x -= floor((localData[threadIdx.x].pos.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; localData[threadIdx.x].pos.y -= floor((localData[threadIdx.x].pos.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; localData[threadIdx.x].pos.z -= floor((localData[threadIdx.x].pos.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } else #endif { // We need to apply periodic boundary conditions separately for each interaction. unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real3 pos2 = localData[atom2].pos; real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); unsigned int offset = atom1; STORE_DERIVATIVES_1 #ifdef USE_CUTOFF unsigned int atom2 = atomIndices[threadIdx.x]; #else unsigned int atom2 = y*TILE_SIZE + tgx; #endif if (atom2 < PADDED_NUM_ATOMS) { atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); offset = atom2; STORE_DERIVATIVES_2 } } pos++; } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; }
6a686d77033d98b7629f00aa21f87491341521ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "reduction.h" /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ // cuda thread synchronization __global__ void reduction_kernel_1(float* g_out, float* g_in, unsigned int size) { unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float s_data[]; s_data[threadIdx.x] = (idx_x < size) ? g_in[idx_x] : 0.f; __syncthreads(); // do reduction // interleaved addressing for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { int index = 2 * stride * threadIdx.x; if (index < blockDim.x) s_data[index] += s_data[index + stride]; __syncthreads(); } if (threadIdx.x == 0) g_out[blockIdx.x] = s_data[0]; } int reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads) { int n_blocks = (size + n_threads - 1) / n_threads; hipLaunchKernelGGL(( reduction_kernel_1), dim3(n_blocks), dim3(n_threads), n_threads * sizeof(float), 0 , g_outPtr, g_inPtr, size); return n_blocks; }
6a686d77033d98b7629f00aa21f87491341521ec.cu
#include <stdio.h> #include "reduction.h" /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ // cuda thread synchronization __global__ void reduction_kernel_1(float* g_out, float* g_in, unsigned int size) { unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float s_data[]; s_data[threadIdx.x] = (idx_x < size) ? g_in[idx_x] : 0.f; __syncthreads(); // do reduction // interleaved addressing for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { int index = 2 * stride * threadIdx.x; if (index < blockDim.x) s_data[index] += s_data[index + stride]; __syncthreads(); } if (threadIdx.x == 0) g_out[blockIdx.x] = s_data[0]; } int reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads) { int n_blocks = (size + n_threads - 1) / n_threads; reduction_kernel_1<<< n_blocks, n_threads, n_threads * sizeof(float), 0 >>>(g_outPtr, g_inPtr, size); return n_blocks; }
bd56a3af626d4a57fad613f2836defe34a3f4413.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::device_vector<int> dev_in(idata, idata+n); thrust::device_vector<int> dev_op(odata, odata+n); thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_op.begin()); timer().endGpuTimer(); thrust::copy(dev_op.begin(), dev_op.end(), odata); } } }
bd56a3af626d4a57fad613f2836defe34a3f4413.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::device_vector<int> dev_in(idata, idata+n); thrust::device_vector<int> dev_op(odata, odata+n); thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_op.begin()); timer().endGpuTimer(); thrust::copy(dev_op.begin(), dev_op.end(), odata); } } }
c35050224a8635f1af2d1aaf74b30954db3ea26d.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/color.hpp" using namespace cv::gpu; using namespace cv::gpu::device; namespace cv { namespace gpu { namespace color { #define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \ void name(const DevMem2D& src, const DevMem2D& dst, hipStream_t stream) \ { \ traits::functor_type functor = traits::create_functor(); \ typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::result_type dst_t; \ transform((DevMem2D_<src_t>)src, (DevMem2D_<dst_t>)dst, functor, stream); \ } #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgra) #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F }}}
c35050224a8635f1af2d1aaf74b30954db3ea26d.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/color.hpp" using namespace cv::gpu; using namespace cv::gpu::device; namespace cv { namespace gpu { namespace color { #define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \ void name(const DevMem2D& src, const DevMem2D& dst, cudaStream_t stream) \ { \ traits::functor_type functor = traits::create_functor(); \ typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::result_type dst_t; \ transform((DevMem2D_<src_t>)src, (DevMem2D_<dst_t>)dst, functor, stream); \ } #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hsv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(hls4_to_bgra) #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F }}}
e8b1d18fdbd43eb84273616fba8cc7958bb5de0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Multiply.h" #include "Multiply.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* multiplication of data arrays in a element-wise manner c(i) = a(i)*b(i) >> a - data array a >> b - data array b >> c - result data array >> size - size of c */ __global__ void KernelMulElementWise(DTYPE * a, DTYPE * b, DTYPE * c, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) c[i] = a[i] * b[i]; } /* multiplication of data arrays in a element-wise manner c(i) = a(i)*b(i) + \alpha*c(i) >> a - data array a >> b - data array b >> c - result data array >> size - size of c >> alpha - the coefficient */ __global__ void KernelMulElementWiseV2(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE alpha) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) c[i] = a[i] * b[i] + alpha * c[i]; } /* multiplication of two tensors in a element-wise manner c(i) = a(i)*b(i). Note that a and b can be of different sizes here, i.e., |a_lead| <= |c_lead| and |b_lead| <= |c_lead| where |a_lead| means the size of the leading dimension of a >> a - tensor a >> b - tensor b >> c - result tensor >> alpha - the coefficient >> stride - the number of items we go over when move next along the leading dimension in a block >> ldSizeA - size of the leading dimension of a >> ldSizeB - size of the leading dimension of b >> ldSizeC - size of the leading dimension of c >> blockNum - number of blocks */ template<int nonZeroAlpha> __global__ void KernelMulElementWiseTensorDynamic(DTYPE * a, DTYPE * b, DTYPE * c, DTYPE alpha, int stride, int ldSizeA, int ldSizeB, int ldSizeC, int blockNum) { __shared__ DTYPE* ap[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE* bp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE* cp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= blockNum * stride || j >= ldSizeC) return; if (threadIdx.y == 0) { int block = i / stride; int size = block * stride; ap[threadIdx.x] = a + size * ldSizeA; bp[threadIdx.x] = b + size * ldSizeB; cp[threadIdx.x] = c + size * ldSizeC; } __syncthreads(); int aj = j >= ldSizeA ? j % ldSizeA : j; int bj = j >= ldSizeB ? j % ldSizeB : j; int offseti = i % stride; if (nonZeroAlpha == 0) cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] * bp[threadIdx.x][bj * ldSizeB + offseti]; else cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] * bp[threadIdx.x][bj * ldSizeB + offseti] + alpha * cp[threadIdx.x][j * ldSizeC + offseti]; } /* element-wise product of two tensors c(i) = a(i)*b(i) + \alpha * c(i) where i is the item index >> a - tensor a >> b - tensor b >> c - result tensor >> alpha - the coefficient >> leadingDim - dimension along which we perform broadcasting */ void _CudaMultiply(const XTensor * a, const XTensor * b, XTensor * c, DTYPE alpha, int leadingDim) { CheckNTErrors((a->unitNum <= c->unitNum && b->unitNum <= c->unitNum), "Unmatched tensors in multiplication!"); CheckNTErrors((a->order == b->order && a->order == c->order), "Unmatched tensors!"); int stride = 1; int blockSizeA = 1; int blockNum = 1; int dimensionSizeA = a->dimSize[leadingDim]; int dimensionSizeB = b->dimSize[leadingDim]; int dimensionSizeC = c->dimSize[leadingDim]; for (int i = 0; i < a->order; i++) { if (i != leadingDim) { CheckNTErrors((a->dimSize[i] == b->dimSize[i] && a->dimSize[i] == c->dimSize[i]), "Unmatched tensors!"); } if (i > leadingDim) stride *= a->dimSize[i]; } blockSizeA = stride * dimensionSizeA; blockNum = a->unitNum / blockSizeA; int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (!a->isSparse && !b->isSparse) { if (a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE) { int cudaGridSize[3]; int cudaBlockSize[3]; if (a->unitNum == c->unitNum && b->unitNum == c->unitNum) { GDevs.GetCudaThread(a->devID, c->unitNum, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0]), threads(cudaBlockSize[0]); if (alpha == 0) KernelMulElementWise << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum); else KernelMulElementWiseV2 << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum, alpha); } else { GDevs.GetCudaThread2D(c->devID, stride * blockNum, dimensionSizeC, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (alpha == 0) { KernelMulElementWiseTensorDynamic<0> << <blocks, threads >> > ((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, 0, stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum); } else { KernelMulElementWiseTensorDynamic<1> << <blocks, threads >> > ((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, alpha, stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum); } } } else { // TODO!! ShowNTErrors("TODO!"); } } else { // TODO!! ShowNTErrors("TODO!"); } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_ROCM } // namespace nts(NiuTrans.Tensor)
e8b1d18fdbd43eb84273616fba8cc7958bb5de0d.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Multiply.h" #include "Multiply.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* multiplication of data arrays in a element-wise manner c(i) = a(i)*b(i) >> a - data array a >> b - data array b >> c - result data array >> size - size of c */ __global__ void KernelMulElementWise(DTYPE * a, DTYPE * b, DTYPE * c, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) c[i] = a[i] * b[i]; } /* multiplication of data arrays in a element-wise manner c(i) = a(i)*b(i) + \alpha*c(i) >> a - data array a >> b - data array b >> c - result data array >> size - size of c >> alpha - the coefficient */ __global__ void KernelMulElementWiseV2(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE alpha) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) c[i] = a[i] * b[i] + alpha * c[i]; } /* multiplication of two tensors in a element-wise manner c(i) = a(i)*b(i). Note that a and b can be of different sizes here, i.e., |a_lead| <= |c_lead| and |b_lead| <= |c_lead| where |a_lead| means the size of the leading dimension of a >> a - tensor a >> b - tensor b >> c - result tensor >> alpha - the coefficient >> stride - the number of items we go over when move next along the leading dimension in a block >> ldSizeA - size of the leading dimension of a >> ldSizeB - size of the leading dimension of b >> ldSizeC - size of the leading dimension of c >> blockNum - number of blocks */ template<int nonZeroAlpha> __global__ void KernelMulElementWiseTensorDynamic(DTYPE * a, DTYPE * b, DTYPE * c, DTYPE alpha, int stride, int ldSizeA, int ldSizeB, int ldSizeC, int blockNum) { __shared__ DTYPE* ap[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE* bp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE* cp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= blockNum * stride || j >= ldSizeC) return; if (threadIdx.y == 0) { int block = i / stride; int size = block * stride; ap[threadIdx.x] = a + size * ldSizeA; bp[threadIdx.x] = b + size * ldSizeB; cp[threadIdx.x] = c + size * ldSizeC; } __syncthreads(); int aj = j >= ldSizeA ? j % ldSizeA : j; int bj = j >= ldSizeB ? j % ldSizeB : j; int offseti = i % stride; if (nonZeroAlpha == 0) cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] * bp[threadIdx.x][bj * ldSizeB + offseti]; else cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] * bp[threadIdx.x][bj * ldSizeB + offseti] + alpha * cp[threadIdx.x][j * ldSizeC + offseti]; } /* element-wise product of two tensors c(i) = a(i)*b(i) + \alpha * c(i) where i is the item index >> a - tensor a >> b - tensor b >> c - result tensor >> alpha - the coefficient >> leadingDim - dimension along which we perform broadcasting */ void _CudaMultiply(const XTensor * a, const XTensor * b, XTensor * c, DTYPE alpha, int leadingDim) { CheckNTErrors((a->unitNum <= c->unitNum && b->unitNum <= c->unitNum), "Unmatched tensors in multiplication!"); CheckNTErrors((a->order == b->order && a->order == c->order), "Unmatched tensors!"); int stride = 1; int blockSizeA = 1; int blockNum = 1; int dimensionSizeA = a->dimSize[leadingDim]; int dimensionSizeB = b->dimSize[leadingDim]; int dimensionSizeC = c->dimSize[leadingDim]; for (int i = 0; i < a->order; i++) { if (i != leadingDim) { CheckNTErrors((a->dimSize[i] == b->dimSize[i] && a->dimSize[i] == c->dimSize[i]), "Unmatched tensors!"); } if (i > leadingDim) stride *= a->dimSize[i]; } blockSizeA = stride * dimensionSizeA; blockNum = a->unitNum / blockSizeA; int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (!a->isSparse && !b->isSparse) { if (a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE) { int cudaGridSize[3]; int cudaBlockSize[3]; if (a->unitNum == c->unitNum && b->unitNum == c->unitNum) { GDevs.GetCudaThread(a->devID, c->unitNum, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0]), threads(cudaBlockSize[0]); if (alpha == 0) KernelMulElementWise << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum); else KernelMulElementWiseV2 << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum, alpha); } else { GDevs.GetCudaThread2D(c->devID, stride * blockNum, dimensionSizeC, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (alpha == 0) { KernelMulElementWiseTensorDynamic<0> << <blocks, threads >> > ((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, 0, stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum); } else { KernelMulElementWiseTensorDynamic<1> << <blocks, threads >> > ((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, alpha, stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum); } } } else { // TODO!! ShowNTErrors("TODO!"); } } else { // TODO!! ShowNTErrors("TODO!"); } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
4fd8d2f1fa371fa3d5ba6d2af1545575e7d044aa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "uplo_sqrt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( uplo_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( uplo_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( uplo_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4fd8d2f1fa371fa3d5ba6d2af1545575e7d044aa.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "uplo_sqrt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); uplo_sqrt<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { uplo_sqrt<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { uplo_sqrt<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e06f7c8117213c698231dc8feb9fdcae11d21ed6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "complex.cuh" #include "color.cuh" #include "FreeImage.h" #define WIDTH 1280 #define HEIGHT 720 #define BPP 24 #define N WIDTH*HEIGHT #define ESCAPE_RADIUS 1000.0 #define ANTIALIAS 3 void save_img(int idx, Color *c_arr) { char filename[20]; sprintf(filename, "images/%05d.png", idx); FIBITMAP *bitmap = FreeImage_Allocate(WIDTH, HEIGHT, BPP); RGBQUAD color; if (!bitmap) { std::cout << "Failed to allocate bitmap. Exiting!" << std::endl; exit(1); } for (int y=0; y<HEIGHT; y++) { for (int x=0; x<WIDTH; x++) { int i = y*WIDTH + x; color.rgbRed = c_arr[i].r * 255.0; color.rgbGreen = c_arr[i].g * 255.0; color.rgbBlue = c_arr[i].b * 255.0; FreeImage_SetPixelColor(bitmap, x, y, &color); } } if (FreeImage_Save(FIF_PNG, bitmap, filename, 0)) { std::cout << "successfully saved " << filename << std::endl; } FreeImage_Unload(bitmap); } __device__ Complex mandelIterate(Complex z, Complex c) { return z*z + c; } #define sin60 0.86602540378443871 #define cos60 0.5 #define tan30 0.57735026918962573 #define transform(cp, a, b, c, d) Complex(a*cp.re + b*cp.im, c*cp.re + d*cp.im) __device__ Complex sierpinskiIterate(Complex z) { float scale = 2.0; z.re = abs(z.re); if (z.im < tan30 * z.re) { z = transform(z, cos60, sin60, sin60, -cos60); } z.re = z.re * scale; z.im = z.im * scale - (scale-1.0); return z; } __device__ Color getCol(Complex p, double time) { Complex z = p; float minDist = 2.0*ESCAPE_RADIUS; double iterations = -1.; for (int i=0; i<1024; i++) { if (i%2==0) { z = sierpinskiIterate(z); } else { z = mandelIterate(z, p); } double azr = abs(z.re); if (azr < 0.1) { double a = 5.0*sin(2.0*z.im+1.0*time); double trap = azr / (1.0 + 4.0*exp(-a*a)); minDist = min(trap, minDist); } if (lengthsquared(z) > ESCAPE_RADIUS*ESCAPE_RADIUS) { iterations = (double) i; if (i%2==0) { iterations = iterations + 2.0 - log(length(z)) / log(2.0) + log(ESCAPE_RADIUS) / log(2.0); } else { iterations = iterations + 2.0 - log(log(length(z))) / log(2.0) + log(log(ESCAPE_RADIUS)) / log(2.0); } break; } } Color col; if (iterations >= 0.0) { float a = pow(iterations/50, 0.5) + 0.3; col = gradient(a); } else { col = Color(0.3, 0.1, 0.1) + Color(2.5, 1.0, 1.0) / (1.+pow(2000.*minDist, 1.5)); } if (col.r > 1.0) col.r = 1.0; if (col.g > 1.0) col.g = 1.0; if (col.b > 1.0) col.b = 1.0; return col; } __global__ void render(double cx, double cy, double zoom, Color *col_arr, double time) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= ANTIALIAS*ANTIALIAS*N) { return; } double x = index % (WIDTH*ANTIALIAS); double y = index / (WIDTH*ANTIALIAS); double nx = (2.0*x - WIDTH*ANTIALIAS) / (HEIGHT*ANTIALIAS); double ny = (2.0*y - HEIGHT*ANTIALIAS) / (HEIGHT*ANTIALIAS); Complex p(cx + zoom * nx, cy + zoom * ny); col_arr[index] = getCol(p, time); } __global__ void downscale(Color *col_arr, Color *img_arr) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) { return; } int x = index % WIDTH; int y = index / WIDTH; Color c(0.0, 0.0, 0.0); for (int dx = 0; dx < ANTIALIAS; dx++) { for (int dy = 0; dy < ANTIALIAS; dy++) { int cx = x * ANTIALIAS + dx; int cy = y * ANTIALIAS + dy; c = c + col_arr[cy*WIDTH*ANTIALIAS + cx]; } } c = c / (ANTIALIAS*ANTIALIAS); img_arr[index] = c; } /*void setupInputs(double cx, double cy, double zoom, Complex *p_arr) { for (int y=0; y<HEIGHT*ANTIALIAS; y++) { for (int x=0; x<WIDTH*ANTIALIAS; x++) { int i = y*WIDTH*ANTIALIAS + x; double nx = (2.0*x - WIDTH*ANTIALIAS) / (HEIGHT*ANTIALIAS); double ny = (2.0*y - HEIGHT*ANTIALIAS) / (HEIGHT*ANTIALIAS); p_arr[i] = Complex(cx + zoom * nx, cy + zoom * ny); } } }*/ void processFrame(Color *col_arr, Color *img_arr, double cx, double cy, double zoom, double time) { //setupInputs(cx, cy, zoom, p_arr); int blockSize = 256; int numBlocks; numBlocks = (ANTIALIAS*ANTIALIAS*N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( render), dim3(numBlocks), dim3(blockSize), 0, 0, cx, cy, zoom, col_arr, time); hipDeviceSynchronize(); numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( downscale), dim3(numBlocks), dim3(blockSize), 0, 0, col_arr, img_arr); hipDeviceSynchronize(); } struct KeyFrame { int frame; double cx; double cy; double zoom; KeyFrame(int _frame, double _cx, double _cy, double _zoom) { frame = _frame; cx = _cx; cy = _cy; zoom = _zoom; } }; void animate(Color *col_arr, Color *img_arr, KeyFrame *animation, int keyFrames) { int frameIdx = 0; int keyFrameIdx = 0; while (keyFrameIdx < keyFrames-1) { KeyFrame kf1 = animation[keyFrameIdx]; KeyFrame kf2 = animation[keyFrameIdx+1]; double a = (double)(frameIdx - kf1.frame) / (double)(kf2.frame - kf1.frame); double cx = kf1.cx * (1.0-a) + kf2.cx * a; double cy = kf1.cy * (1.0-a) + kf2.cy * a; double logz1 = log(kf1.zoom); double logz2 = log(kf2.zoom); double logz = logz1 * (1.0-a) + logz2 * a; double zoom = exp(logz); processFrame(col_arr, img_arr, cx, cy, zoom, frameIdx/30.0); save_img(frameIdx, img_arr); frameIdx++; if (frameIdx >= animation[keyFrameIdx+1].frame) { keyFrameIdx++; } } } int main(int argc, char **argv) { FreeImage_Initialise(); //Complex *p_arr; Color *col_arr; Color *img_arr; //hipMallocManaged(&p_arr, ANTIALIAS*ANTIALIAS*N*sizeof(Complex)); hipMallocManaged(&col_arr, ANTIALIAS*ANTIALIAS*N*sizeof(Color)); hipMallocManaged(&img_arr, N*sizeof(Color)); add_gradient_color(0.0, 0.3, 1.0); add_gradient_color(1.0, 1.0, 1.0); add_gradient_color(1.0, 0.6, 0.0); add_gradient_color(0.0, 0.0, 0.2); load_gradient(); if (argc < 4) { std::cout << "needs at least 3 arguments" << std::endl; exit(0); } double cx = atof(argv[1]); double cy = atof(argv[2]); double zoom = atof(argv[3]); int id = 1; if (argc >= 5) { id = atoi(argv[4]); } processFrame(col_arr, img_arr, cx, cy, zoom, 1.0); save_img(id, img_arr); KeyFrame animation[] = { KeyFrame(0, 0.10034702602, 0.10016028923, 0.0000000001), KeyFrame(100, 0.10034702602, 0.10016028923, 0.0000000001), KeyFrame(4970, 0.10034702602, 0.10016028923, 0.75), KeyFrame(5000, 0.0, 0.0, 0.75), KeyFrame(5800, 0.0, 0.0, 0.75), KeyFrame(5830, -0.74507300650, 0.10275605064, 0.75), KeyFrame(10700, -0.74507300650, 0.10275605064, 0.0000000001), KeyFrame(10800, -0.74507300650, 0.10275605064, 0.0000000001), //KeyFrame(30, -.87591, .20464, 0.25) }; //animate(col_arr, img_arr, animation, sizeof(animation)/sizeof(animation[0])); //hipFree(p_arr); hipFree(col_arr); hipFree(img_arr); FreeImage_DeInitialise(); return 0; }
e06f7c8117213c698231dc8feb9fdcae11d21ed6.cu
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "complex.cuh" #include "color.cuh" #include "FreeImage.h" #define WIDTH 1280 #define HEIGHT 720 #define BPP 24 #define N WIDTH*HEIGHT #define ESCAPE_RADIUS 1000.0 #define ANTIALIAS 3 void save_img(int idx, Color *c_arr) { char filename[20]; sprintf(filename, "images/%05d.png", idx); FIBITMAP *bitmap = FreeImage_Allocate(WIDTH, HEIGHT, BPP); RGBQUAD color; if (!bitmap) { std::cout << "Failed to allocate bitmap. Exiting!" << std::endl; exit(1); } for (int y=0; y<HEIGHT; y++) { for (int x=0; x<WIDTH; x++) { int i = y*WIDTH + x; color.rgbRed = c_arr[i].r * 255.0; color.rgbGreen = c_arr[i].g * 255.0; color.rgbBlue = c_arr[i].b * 255.0; FreeImage_SetPixelColor(bitmap, x, y, &color); } } if (FreeImage_Save(FIF_PNG, bitmap, filename, 0)) { std::cout << "successfully saved " << filename << std::endl; } FreeImage_Unload(bitmap); } __device__ Complex mandelIterate(Complex z, Complex c) { return z*z + c; } #define sin60 0.86602540378443871 #define cos60 0.5 #define tan30 0.57735026918962573 #define transform(cp, a, b, c, d) Complex(a*cp.re + b*cp.im, c*cp.re + d*cp.im) __device__ Complex sierpinskiIterate(Complex z) { float scale = 2.0; z.re = abs(z.re); if (z.im < tan30 * z.re) { z = transform(z, cos60, sin60, sin60, -cos60); } z.re = z.re * scale; z.im = z.im * scale - (scale-1.0); return z; } __device__ Color getCol(Complex p, double time) { Complex z = p; float minDist = 2.0*ESCAPE_RADIUS; double iterations = -1.; for (int i=0; i<1024; i++) { if (i%2==0) { z = sierpinskiIterate(z); } else { z = mandelIterate(z, p); } double azr = abs(z.re); if (azr < 0.1) { double a = 5.0*sin(2.0*z.im+1.0*time); double trap = azr / (1.0 + 4.0*exp(-a*a)); minDist = min(trap, minDist); } if (lengthsquared(z) > ESCAPE_RADIUS*ESCAPE_RADIUS) { iterations = (double) i; if (i%2==0) { iterations = iterations + 2.0 - log(length(z)) / log(2.0) + log(ESCAPE_RADIUS) / log(2.0); } else { iterations = iterations + 2.0 - log(log(length(z))) / log(2.0) + log(log(ESCAPE_RADIUS)) / log(2.0); } break; } } Color col; if (iterations >= 0.0) { float a = pow(iterations/50, 0.5) + 0.3; col = gradient(a); } else { col = Color(0.3, 0.1, 0.1) + Color(2.5, 1.0, 1.0) / (1.+pow(2000.*minDist, 1.5)); } if (col.r > 1.0) col.r = 1.0; if (col.g > 1.0) col.g = 1.0; if (col.b > 1.0) col.b = 1.0; return col; } __global__ void render(double cx, double cy, double zoom, Color *col_arr, double time) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= ANTIALIAS*ANTIALIAS*N) { return; } double x = index % (WIDTH*ANTIALIAS); double y = index / (WIDTH*ANTIALIAS); double nx = (2.0*x - WIDTH*ANTIALIAS) / (HEIGHT*ANTIALIAS); double ny = (2.0*y - HEIGHT*ANTIALIAS) / (HEIGHT*ANTIALIAS); Complex p(cx + zoom * nx, cy + zoom * ny); col_arr[index] = getCol(p, time); } __global__ void downscale(Color *col_arr, Color *img_arr) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) { return; } int x = index % WIDTH; int y = index / WIDTH; Color c(0.0, 0.0, 0.0); for (int dx = 0; dx < ANTIALIAS; dx++) { for (int dy = 0; dy < ANTIALIAS; dy++) { int cx = x * ANTIALIAS + dx; int cy = y * ANTIALIAS + dy; c = c + col_arr[cy*WIDTH*ANTIALIAS + cx]; } } c = c / (ANTIALIAS*ANTIALIAS); img_arr[index] = c; } /*void setupInputs(double cx, double cy, double zoom, Complex *p_arr) { for (int y=0; y<HEIGHT*ANTIALIAS; y++) { for (int x=0; x<WIDTH*ANTIALIAS; x++) { int i = y*WIDTH*ANTIALIAS + x; double nx = (2.0*x - WIDTH*ANTIALIAS) / (HEIGHT*ANTIALIAS); double ny = (2.0*y - HEIGHT*ANTIALIAS) / (HEIGHT*ANTIALIAS); p_arr[i] = Complex(cx + zoom * nx, cy + zoom * ny); } } }*/ void processFrame(Color *col_arr, Color *img_arr, double cx, double cy, double zoom, double time) { //setupInputs(cx, cy, zoom, p_arr); int blockSize = 256; int numBlocks; numBlocks = (ANTIALIAS*ANTIALIAS*N + blockSize - 1) / blockSize; render<<<numBlocks, blockSize>>>(cx, cy, zoom, col_arr, time); cudaDeviceSynchronize(); numBlocks = (N + blockSize - 1) / blockSize; downscale<<<numBlocks, blockSize>>>(col_arr, img_arr); cudaDeviceSynchronize(); } struct KeyFrame { int frame; double cx; double cy; double zoom; KeyFrame(int _frame, double _cx, double _cy, double _zoom) { frame = _frame; cx = _cx; cy = _cy; zoom = _zoom; } }; void animate(Color *col_arr, Color *img_arr, KeyFrame *animation, int keyFrames) { int frameIdx = 0; int keyFrameIdx = 0; while (keyFrameIdx < keyFrames-1) { KeyFrame kf1 = animation[keyFrameIdx]; KeyFrame kf2 = animation[keyFrameIdx+1]; double a = (double)(frameIdx - kf1.frame) / (double)(kf2.frame - kf1.frame); double cx = kf1.cx * (1.0-a) + kf2.cx * a; double cy = kf1.cy * (1.0-a) + kf2.cy * a; double logz1 = log(kf1.zoom); double logz2 = log(kf2.zoom); double logz = logz1 * (1.0-a) + logz2 * a; double zoom = exp(logz); processFrame(col_arr, img_arr, cx, cy, zoom, frameIdx/30.0); save_img(frameIdx, img_arr); frameIdx++; if (frameIdx >= animation[keyFrameIdx+1].frame) { keyFrameIdx++; } } } int main(int argc, char **argv) { FreeImage_Initialise(); //Complex *p_arr; Color *col_arr; Color *img_arr; //cudaMallocManaged(&p_arr, ANTIALIAS*ANTIALIAS*N*sizeof(Complex)); cudaMallocManaged(&col_arr, ANTIALIAS*ANTIALIAS*N*sizeof(Color)); cudaMallocManaged(&img_arr, N*sizeof(Color)); add_gradient_color(0.0, 0.3, 1.0); add_gradient_color(1.0, 1.0, 1.0); add_gradient_color(1.0, 0.6, 0.0); add_gradient_color(0.0, 0.0, 0.2); load_gradient(); if (argc < 4) { std::cout << "needs at least 3 arguments" << std::endl; exit(0); } double cx = atof(argv[1]); double cy = atof(argv[2]); double zoom = atof(argv[3]); int id = 1; if (argc >= 5) { id = atoi(argv[4]); } processFrame(col_arr, img_arr, cx, cy, zoom, 1.0); save_img(id, img_arr); KeyFrame animation[] = { KeyFrame(0, 0.10034702602, 0.10016028923, 0.0000000001), KeyFrame(100, 0.10034702602, 0.10016028923, 0.0000000001), KeyFrame(4970, 0.10034702602, 0.10016028923, 0.75), KeyFrame(5000, 0.0, 0.0, 0.75), KeyFrame(5800, 0.0, 0.0, 0.75), KeyFrame(5830, -0.74507300650, 0.10275605064, 0.75), KeyFrame(10700, -0.74507300650, 0.10275605064, 0.0000000001), KeyFrame(10800, -0.74507300650, 0.10275605064, 0.0000000001), //KeyFrame(30, -.87591, .20464, 0.25) }; //animate(col_arr, img_arr, animation, sizeof(animation)/sizeof(animation[0])); //cudaFree(p_arr); cudaFree(col_arr); cudaFree(img_arr); FreeImage_DeInitialise(); return 0; }
fab69a3fe2605938af93c62adcf4742373ff7c90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Contrast Correction using CUDA #include "headerinc.h" #define size_seek(a) ((a) == 24 ? 54:1078) using namespace std; BMPHEADER *_head; DIBHEADER *_dib; __global__ void Rgbinv(unsigned char *a, unsigned char *b, unsigned int count) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < count) { float contrast = 50; float correction_factor; correction_factor = (259.0*(contrast + 255.0)) / (255.0*(259.0 - contrast)); float temp = ((correction_factor * ((float)a[id] - 128.0)) + 128.0); // Contract correction taking place here b[id] = temp >= 0 ? (temp <= 255 ? (unsigned char)temp : 255) : 0; } } int main() { FILE *fp, *fp1; unsigned char *data; unsigned char *d_a,*d_b; unsigned int colordata[256]; fp = fopen("C:/Users/cdamo/Desktop/2.bmp", "rb"); fp1 = fopen("C:/Users/cdamo/Desktop/a2.bmp", "wb"); _head = (BMPHEADER *)malloc(sizeof(BMPHEADER)); fread(_head, sizeof(BMPHEADER), 1, fp); if (_head->marker != 19778) { cout << "Not a bmp file"; } else { _dib = (DIBHEADER *)malloc(sizeof(DIBHEADER)); fread(_dib, sizeof(DIBHEADER), 1, fp); if (_dib->bits_pixel == 8) { fread(colordata, 256 * 4, 1, fp); } fwrite(_head, 1, sizeof(BMPHEADER), fp1); fwrite(_dib, 1, sizeof(DIBHEADER), fp1); if (_dib->bits_pixel == 8) fwrite(colordata, 1, 256 * 4, fp1); fseek(fp1, size_seek(_dib->bits_pixel), SEEK_SET); unsigned int padded = floor((float)(_dib->bits_pixel*_dib->width_pixel + 31.0)/32.0)*4 -_dib->bits_pixel/8*_dib->width_pixel ; DWORD pixelarray = ((_dib->bits_pixel/8*_dib->width_pixel)+padded)*_dib->height_pixel; data = (unsigned char *)malloc(pixelarray); fread(data,pixelarray,1,fp); if (hipMalloc(&d_a, pixelarray) != hipSuccess) { cout << "Nope!"; return 0; } if (hipMalloc(&d_b, pixelarray) != hipSuccess) { cout << "Nope!"; hipFree(d_a); return 0; } if (hipMemcpy(d_a, data, pixelarray, hipMemcpyHostToDevice) != hipSuccess) { cout << "Nope!"; hipFree(d_b); hipFree(d_a); return 0; } clock_t begin = clock(); hipLaunchKernelGGL(( Rgbinv) , dim3(pixelarray / 256 + 1) , dim3(256), 0, 0, d_a,d_b,pixelarray); clock_t end = clock(); cout<<((double)end-begin)/CLOCKS_PER_SEC<<" Secs"; if (hipMemcpy(data, d_b, pixelarray, hipMemcpyDeviceToHost) != hipSuccess) { cout << "Nope!"; delete[] data; hipFree(d_b); hipFree(d_a); return 0; } fwrite(data,1,pixelarray,fp1); } fclose(fp); fclose(fp1); delete[] data; hipFree(d_b); hipFree(d_a); return 0; }
fab69a3fe2605938af93c62adcf4742373ff7c90.cu
//Contrast Correction using CUDA #include "headerinc.h" #define size_seek(a) ((a) == 24 ? 54:1078) using namespace std; BMPHEADER *_head; DIBHEADER *_dib; __global__ void Rgbinv(unsigned char *a, unsigned char *b, unsigned int count) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < count) { float contrast = 50; float correction_factor; correction_factor = (259.0*(contrast + 255.0)) / (255.0*(259.0 - contrast)); float temp = ((correction_factor * ((float)a[id] - 128.0)) + 128.0); // Contract correction taking place here b[id] = temp >= 0 ? (temp <= 255 ? (unsigned char)temp : 255) : 0; } } int main() { FILE *fp, *fp1; unsigned char *data; unsigned char *d_a,*d_b; unsigned int colordata[256]; fp = fopen("C:/Users/cdamo/Desktop/2.bmp", "rb"); fp1 = fopen("C:/Users/cdamo/Desktop/a2.bmp", "wb"); _head = (BMPHEADER *)malloc(sizeof(BMPHEADER)); fread(_head, sizeof(BMPHEADER), 1, fp); if (_head->marker != 19778) { cout << "Not a bmp file"; } else { _dib = (DIBHEADER *)malloc(sizeof(DIBHEADER)); fread(_dib, sizeof(DIBHEADER), 1, fp); if (_dib->bits_pixel == 8) { fread(colordata, 256 * 4, 1, fp); } fwrite(_head, 1, sizeof(BMPHEADER), fp1); fwrite(_dib, 1, sizeof(DIBHEADER), fp1); if (_dib->bits_pixel == 8) fwrite(colordata, 1, 256 * 4, fp1); fseek(fp1, size_seek(_dib->bits_pixel), SEEK_SET); unsigned int padded = floor((float)(_dib->bits_pixel*_dib->width_pixel + 31.0)/32.0)*4 -_dib->bits_pixel/8*_dib->width_pixel ; DWORD pixelarray = ((_dib->bits_pixel/8*_dib->width_pixel)+padded)*_dib->height_pixel; data = (unsigned char *)malloc(pixelarray); fread(data,pixelarray,1,fp); if (cudaMalloc(&d_a, pixelarray) != cudaSuccess) { cout << "Nope!"; return 0; } if (cudaMalloc(&d_b, pixelarray) != cudaSuccess) { cout << "Nope!"; cudaFree(d_a); return 0; } if (cudaMemcpy(d_a, data, pixelarray, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "Nope!"; cudaFree(d_b); cudaFree(d_a); return 0; } clock_t begin = clock(); Rgbinv <<<pixelarray / 256 + 1 , 256>>> (d_a,d_b,pixelarray); clock_t end = clock(); cout<<((double)end-begin)/CLOCKS_PER_SEC<<" Secs"; if (cudaMemcpy(data, d_b, pixelarray, cudaMemcpyDeviceToHost) != cudaSuccess) { cout << "Nope!"; delete[] data; cudaFree(d_b); cudaFree(d_a); return 0; } fwrite(data,1,pixelarray,fp1); } fclose(fp); fclose(fp1); delete[] data; cudaFree(d_b); cudaFree(d_a); return 0; }
25ebd53798f4a513daa8ac44e37071026cca5bd5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { struct compressed_stream_s { CompressedStreamInfo info; gpu_inflate_input_s ctl; }; // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData( CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s *const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks const uint8_t *cur = s->info.compressed_data; const uint8_t *end = cur + s->info.compressed_data_size; uint8_t *uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + 3 < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size; gpu_inflate_input_s *init_ctl = nullptr; block_len >>= 1; cur += 3; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_ctl = s->info.copyctl; init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &init_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_ctl = s->info.decctl; init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &init_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_ctl) { s->ctl.srcDevice = const_cast<uint8_t *>(cur); s->ctl.srcSize = block_len; s->ctl.dstDevice = uncompressed + max_uncompressed_size; s->ctl.dstSize = uncompressed_size; } __syncwarp(); if (init_ctl && lane_id == 0) *init_ctl = s->ctl; cur += block_len; max_uncompressed_size += uncompressed_size; } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s *const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks const uint8_t *cur = s->info.compressed_data; const uint8_t *end = cur + s->info.compressed_data_size; const gpu_inflate_input_s *dec_in = s->info.decctl; const gpu_inflate_status_s *dec_out = s->info.decstatus; uint8_t *uncompressed_actual = s->info.uncompressed_data; uint8_t *uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + 3 < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += 3; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) { // Decompression failed, not much point in doing anything else break; } uncompressed_size_est = shuffle((lane_id == 0) ? *(const uint32_t *)&dec_in[num_compressed_blocks].dstSize : 0); uncompressed_size_actual = shuffle( (lane_id == 0) ? *(const uint32_t *)&dec_out[num_compressed_blocks].bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s *s, const uint8_t *start, const uint8_t *end) { constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8; const uint8_t *cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == PB_TYPE_FIXED64) cur += 8; else if (v == PB_TYPE_FIXED32) cur += 4; else if (v == PB_TYPE_VARINT) state = SKIP_VARINT; else if (v == PB_TYPE_FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT; idx_id++; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s *s, int num_rowgroups) { const uint8_t *index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s *s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { const uint8_t *start = s->strm_info[ci_id].compressed_data; const uint8_t *cur = start; const uint8_t *end = cur + s->strm_info[ci_id].compressed_data_size; gpu_inflate_status_s *decstatus = s->strm_info[ci_id].decstatus; uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len, is_uncompressed; if (cur + 3 > end || cur + 3 >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += 3; is_uncompressed = block_len & 1; block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += decstatus->bytes_written; decstatus++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s *const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = (rowidx_stride > 0) ? (s->chunk.num_rows + rowidx_stride - 1) / rowidx_stride : 1; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != NULL); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t *)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t *)&s->rowgroups[i])[j]; } } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } void __host__ ParseCompressedStripeData(CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuParseCompressedStripeData), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams, compression_block_size, log2maxcr); } void __host__ PostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuPostDecompressionReassemble), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams); } /** * @brief Launches kernel for constructing rowgroup from index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] stream CUDA stream to use, default 0 */ void __host__ ParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block hipLaunchKernelGGL(( gpuParseRowGroupIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
25ebd53798f4a513daa8ac44e37071026cca5bd5.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { struct compressed_stream_s { CompressedStreamInfo info; gpu_inflate_input_s ctl; }; // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData( CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s *const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks const uint8_t *cur = s->info.compressed_data; const uint8_t *end = cur + s->info.compressed_data_size; uint8_t *uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + 3 < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size; gpu_inflate_input_s *init_ctl = nullptr; block_len >>= 1; cur += 3; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_ctl = s->info.copyctl; init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &init_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_ctl = s->info.decctl; init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &init_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_ctl) { s->ctl.srcDevice = const_cast<uint8_t *>(cur); s->ctl.srcSize = block_len; s->ctl.dstDevice = uncompressed + max_uncompressed_size; s->ctl.dstSize = uncompressed_size; } __syncwarp(); if (init_ctl && lane_id == 0) *init_ctl = s->ctl; cur += block_len; max_uncompressed_size += uncompressed_size; } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s *const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks const uint8_t *cur = s->info.compressed_data; const uint8_t *end = cur + s->info.compressed_data_size; const gpu_inflate_input_s *dec_in = s->info.decctl; const gpu_inflate_status_s *dec_out = s->info.decstatus; uint8_t *uncompressed_actual = s->info.uncompressed_data; uint8_t *uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + 3 < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += 3; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) { // Decompression failed, not much point in doing anything else break; } uncompressed_size_est = shuffle((lane_id == 0) ? *(const uint32_t *)&dec_in[num_compressed_blocks].dstSize : 0); uncompressed_size_actual = shuffle( (lane_id == 0) ? *(const uint32_t *)&dec_out[num_compressed_blocks].bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s *s, const uint8_t *start, const uint8_t *end) { constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8; const uint8_t *cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == PB_TYPE_FIXED64) cur += 8; else if (v == PB_TYPE_FIXED32) cur += 4; else if (v == PB_TYPE_VARINT) state = SKIP_VARINT; else if (v == PB_TYPE_FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT; idx_id++; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s *s, int num_rowgroups) { const uint8_t *index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s *s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { const uint8_t *start = s->strm_info[ci_id].compressed_data; const uint8_t *cur = start; const uint8_t *end = cur + s->strm_info[ci_id].compressed_data_size; gpu_inflate_status_s *decstatus = s->strm_info[ci_id].decstatus; uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len, is_uncompressed; if (cur + 3 > end || cur + 3 >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += 3; is_uncompressed = block_len & 1; block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += decstatus->bytes_written; decstatus++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s *const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = (rowidx_stride > 0) ? (s->chunk.num_rows + rowidx_stride - 1) / rowidx_stride : 1; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != NULL); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t *)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t *)&s->rowgroups[i])[j]; } } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } void __host__ ParseCompressedStripeData(CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuParseCompressedStripeData<<<dim_grid, dim_block, 0, stream.value()>>>( strm_info, num_streams, compression_block_size, log2maxcr); } void __host__ PostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuPostDecompressionReassemble<<<dim_grid, dim_block, 0, stream.value()>>>(strm_info, num_streams); } /** * @brief Launches kernel for constructing rowgroup from index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] stream CUDA stream to use, default 0 */ void __host__ ParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block gpuParseRowGroupIndex<<<dim_grid, dim_block, 0, stream.value()>>>( row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
a3c096dd2350318184d3ae6c5598078b41322398.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void relu_backward(double* X, double* dout, double* ret, int rlen, int clen) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] = X[index] > 0 ? dout[index] : 0; } }
a3c096dd2350318184d3ae6c5598078b41322398.cu
#include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void relu_backward(double* X, double* dout, double* ret, int rlen, int clen) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] = X[index] > 0 ? dout[index] : 0; } }
0c5981e9672d7a8628d626340cab3a05e8eea7b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/iterator_categories.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/pair.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <cstddef> #include <numeric> namespace cudf { namespace { // align all column size allocations to this boundary so that all output column buffers // start at that alignment. static constexpr std::size_t split_align = 64; /** * @brief Struct which contains information on a source buffer. * * The definition of "buffer" used throughout this module is a component piece of a * cudf column. So for example, a fixed-width column with validity would have 2 associated * buffers : the data itself and the validity buffer. contiguous_split operates by breaking * each column up into it's individual components and copying each one as a separate kernel * block. */ struct src_buf_info { src_buf_info(cudf::type_id _type, const int* _offsets, int _offset_stack_pos, int _parent_offsets_index, bool _is_validity, size_type _column_offset) : type(_type), offsets(_offsets), offset_stack_pos(_offset_stack_pos), parent_offsets_index(_parent_offsets_index), is_validity(_is_validity), column_offset(_column_offset) { } cudf::type_id type; const int* offsets; // a pointer to device memory offsets if I am an offset buffer int offset_stack_pos; // position in the offset stack buffer int parent_offsets_index; // immediate parent that has offsets, or -1 if none bool is_validity; // if I am a validity buffer size_type column_offset; // offset in the case of a sliced column }; /** * @brief Struct which contains information on a destination buffer. * * Similar to src_buf_info, dst_buf_info contains information on a destination buffer we * are going to copy to. If we have N input buffers (which come from X columns), and * M partitions, then we have N*M destination buffers. */ struct dst_buf_info { // constant across all copy commands for this buffer std::size_t buf_size; // total size of buffer, including padding int num_elements; // # of elements to be copied int element_size; // size of each element in bytes int num_rows; // # of rows to be copied(which may be different from num_elements in the case of // validity or offset buffers) int src_element_index; // element index to start reading from from my associated source buffer std::size_t dst_offset; // my offset into the per-partition allocation int value_shift; // amount to shift values down by (for offset buffers) int bit_shift; // # of bits to shift right by (for validity buffers) size_type valid_count; // validity count for this block of work int src_buf_index; // source buffer index int dst_buf_index; // destination buffer index }; /** * @brief Copy a single buffer of column data, shifting values (for offset columns), * and validity (for validity buffers) as necessary. * * Copies a single partition of a source column buffer to a destination buffer. Shifts * element values by value_shift in the case of a buffer of offsets (value_shift will * only ever be > 0 in that case). Shifts elements bitwise by bit_shift in the case of * a validity buffer (bif_shift will only ever be > 0 in that case). This function assumes * value_shift and bit_shift will never be > 0 at the same time. * * This function expects: * - src may be a misaligned address * - dst must be an aligned address * * This function always does the ALU work related to value_shift and bit_shift because it is * entirely memory-bandwidth bound. * * @param dst Destination buffer * @param src Source buffer * @param t Thread index * @param num_elements Number of elements to copy * @param element_size Size of each element in bytes * @param src_element_index Element index to start copying at * @param stride Size of the kernel block * @param value_shift Shift incoming 4-byte offset values down by this amount * @param bit_shift Shift incoming data right by this many bits * @param num_rows Number of rows being copied * @param valid_count Optional pointer to a value to store count of set bits */ template <int block_size> __device__ void copy_buffer(uint8_t* __restrict__ dst, uint8_t const* __restrict__ src, int t, std::size_t num_elements, std::size_t element_size, std::size_t src_element_index, uint32_t stride, int value_shift, int bit_shift, std::size_t num_rows, size_type* valid_count) { src += (src_element_index * element_size); size_type thread_valid_count = 0; // handle misalignment. read 16 bytes in 4 byte reads. write in a single 16 byte store. std::size_t const num_bytes = num_elements * element_size; // how many bytes we're misaligned from 4-byte alignment uint32_t const ofs = reinterpret_cast<uintptr_t>(src) % 4; std::size_t pos = t * 16; stride *= 16; while (pos + 20 <= num_bytes) { // read from the nearest aligned address. const uint32_t* in32 = reinterpret_cast<const uint32_t*>((src + pos) - ofs); uint4 v = uint4{in32[0], in32[1], in32[2], in32[3]}; if (ofs || bit_shift) { v.x = __funnelshift_r(v.x, v.y, ofs * 8 + bit_shift); v.y = __funnelshift_r(v.y, v.z, ofs * 8 + bit_shift); v.z = __funnelshift_r(v.z, v.w, ofs * 8 + bit_shift); v.w = __funnelshift_r(v.w, in32[4], ofs * 8 + bit_shift); } v.x -= value_shift; v.y -= value_shift; v.z -= value_shift; v.w -= value_shift; reinterpret_cast<uint4*>(dst)[pos / 16] = v; if (valid_count) { thread_valid_count += (__popc(v.x) + __popc(v.y) + __popc(v.z) + __popc(v.w)); } pos += stride; } // copy trailing bytes if (t == 0) { std::size_t remainder; if (num_bytes < 16) { remainder = num_bytes; } else { std::size_t const last_bracket = (num_bytes / 16) * 16; remainder = num_bytes - last_bracket; if (remainder < 4) { // we had less than 20 bytes for the last possible 16 byte copy, so copy 16 + the extra remainder += 16; } } // if we're performing a value shift (offsets), or a bit shift (validity) the # of bytes and // alignment must be a multiple of 4. value shifting and bit shifting are mutually exclusive // and will never both be true at the same time. if (value_shift || bit_shift) { std::size_t idx = (num_bytes - remainder) / 4; uint32_t v = remainder > 0 ? (reinterpret_cast<uint32_t const*>(src)[idx] - value_shift) : 0; constexpr size_type rows_per_element = 32; auto const have_trailing_bits = ((num_elements * rows_per_element) - num_rows) < bit_shift; while (remainder) { // if we're at the very last word of a validity copy, we do not always need to read the next // word to get the final trailing bits. auto const read_trailing_bits = bit_shift > 0 && remainder == 4 && have_trailing_bits; uint32_t const next = (read_trailing_bits || remainder > 4) ? (reinterpret_cast<uint32_t const*>(src)[idx + 1] - value_shift) : 0; uint32_t const val = (v >> bit_shift) | (next << (32 - bit_shift)); if (valid_count) { thread_valid_count += __popc(val); } reinterpret_cast<uint32_t*>(dst)[idx] = val; v = next; idx++; remainder -= 4; } } else { while (remainder) { std::size_t const idx = num_bytes - remainder--; uint32_t const val = reinterpret_cast<uint8_t const*>(src)[idx]; if (valid_count) { thread_valid_count += __popc(val); } reinterpret_cast<uint8_t*>(dst)[idx] = val; } } } if (valid_count) { if (num_bytes == 0) { if (!t) { *valid_count = 0; } } else { using BlockReduce = hipcub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_valid_count{BlockReduce(temp_storage).Sum(thread_valid_count)}; if (!t) { // we may have copied more bits than there are actual rows in the output. // so we need to subtract off the count of any bits that shouldn't have been // considered during the copy step. std::size_t const max_row = (num_bytes * 8); std::size_t const slack_bits = max_row > num_rows ? max_row - num_rows : 0; auto const slack_mask = set_most_significant_bits(slack_bits); if (slack_mask > 0) { uint32_t const last_word = reinterpret_cast<uint32_t*>(dst + (num_bytes - 4))[0]; block_valid_count -= __popc(last_word & slack_mask); } *valid_count = block_valid_count; } } } } /** * @brief Kernel which copies data from multiple source buffers to multiple * destination buffers. * * When doing a contiguous_split on X columns comprising N total internal buffers * with M splits, we end up having to copy N*M source/destination buffer pairs. * These logical copies are further subdivided to distribute the amount of work * to be done as evenly as possible across the multiprocessors on the device. * This kernel is arranged such that each block copies 1 source/destination pair. * * @param src_bufs Input source buffers * @param dst_bufs Destination buffers * @param buf_info Information on the range of values to be copied for each destination buffer. */ template <int block_size> __global__ void copy_partitions(uint8_t const** src_bufs, uint8_t** dst_bufs, dst_buf_info* buf_info) { auto const buf_index = blockIdx.x; auto const src_buf_index = buf_info[buf_index].src_buf_index; auto const dst_buf_index = buf_info[buf_index].dst_buf_index; // copy, shifting offsets and validity bits as needed copy_buffer<block_size>( dst_bufs[dst_buf_index] + buf_info[buf_index].dst_offset, src_bufs[src_buf_index], threadIdx.x, buf_info[buf_index].num_elements, buf_info[buf_index].element_size, buf_info[buf_index].src_element_index, blockDim.x, buf_info[buf_index].value_shift, buf_info[buf_index].bit_shift, buf_info[buf_index].num_rows, buf_info[buf_index].valid_count > 0 ? &buf_info[buf_index].valid_count : nullptr); } // The block of functions below are all related: // // compute_offset_stack_size() // setup_src_buf_data() // count_src_bufs() // setup_source_buf_info() // build_output_columns() // // Critically, they all traverse the hierarchy of source columns and their children // in a specific order to guarantee they produce various outputs in a consistent // way. For example, setup_src_buf_info() produces a series of information // structs that must appear in the same order that setup_src_buf_data() produces // buffers. // // So please be careful if you change the way in which these functions and // functors traverse the hierarchy. /** * @brief Returns whether or not the specified type is a column that contains offsets. */ bool is_offset_type(type_id id) { return (id == type_id::STRING or id == type_id::LIST); } /** * @brief Compute total device memory stack size needed to process nested * offsets per-output buffer. * * When determining the range of rows to be copied for each output buffer * we have to recursively apply the stack of offsets from our parent columns * (lists or strings). We want to do this computation on the gpu because offsets * are stored in device memory. However we don't want to do recursion on the gpu, so * each destination buffer gets a "stack" of space to work with equal in size to * it's offset nesting depth. This function computes the total size of all of those * stacks. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param offset_depth Current offset nesting depth * * @returns Total offset stack size needed for this range of columns. */ template <typename InputIter> std::size_t compute_offset_stack_size(InputIter begin, InputIter end, int offset_depth = 0) { return std::accumulate(begin, end, 0, [offset_depth](auto stack_size, column_view const& col) { auto const num_buffers = 1 + (col.nullable() ? 1 : 0); return stack_size + (offset_depth * num_buffers) + compute_offset_stack_size( col.child_begin(), col.child_end(), offset_depth + is_offset_type(col.type().id())); }); } /** * @brief Retrieve all buffers for a range of source columns. * * Retrieve the individual buffers that make up a range of input columns. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param out_buf Iterator into output buffer infos * * @returns next output buffer iterator */ template <typename InputIter, typename OutputIter> OutputIter setup_src_buf_data(InputIter begin, InputIter end, OutputIter out_buf) { std::for_each(begin, end, [&out_buf](column_view const& col) { if (col.nullable()) { *out_buf = reinterpret_cast<uint8_t const*>(col.null_mask()); out_buf++; } // NOTE: we're always returning the base pointer here. column-level offset is accounted // for later. Also, for some column types (string, list, struct) this pointer will be null // because there is no associated data with the root column. *out_buf = col.head<uint8_t>(); out_buf++; out_buf = setup_src_buf_data(col.child_begin(), col.child_end(), out_buf); }); return out_buf; } /** * @brief Count the total number of source buffers we will be copying * from. * * This count includes buffers for all input columns. For example a * fixed-width column with validity would be 2 buffers (data, validity). * A string column with validity would be 3 buffers (chars, offsets, validity). * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * * @returns total number of source buffers for this range of columns */ template <typename InputIter> size_type count_src_bufs(InputIter begin, InputIter end) { auto buf_iter = thrust::make_transform_iterator(begin, [](column_view const& col) { return 1 + (col.nullable() ? 1 : 0) + count_src_bufs(col.child_begin(), col.child_end()); }); return std::accumulate(buf_iter, buf_iter + std::distance(begin, end), 0); } /** * @brief Computes source buffer information for the copy kernel. * * For each input column to be split we need to know several pieces of information * in the copy kernel. This function traverses the input columns and prepares this * information for the gpu. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param head Beginning of source buffer info array * @param current Current source buffer info to be written to * @param offset_stack_pos Integer representing our current offset nesting depth * (how many list or string levels deep we are) * @param parent_offset_index Index into src_buf_info output array indicating our nearest * containing list parent. -1 if we have no list parent * @param offset_depth Current offset nesting depth (how many list levels deep we are) * * @returns next src_buf_output after processing this range of input columns */ // setup source buf info template <typename InputIter> std::pair<src_buf_info*, size_type> setup_source_buf_info(InputIter begin, InputIter end, src_buf_info* head, src_buf_info* current, int offset_stack_pos = 0, int parent_offset_index = -1, int offset_depth = 0); /** * @brief Functor that builds source buffer information based on input columns. * * Called by setup_source_buf_info to build information for a single source column. This function * will recursively call setup_source_buf_info in the case of nested types. */ struct buf_info_functor { src_buf_info* head; template <typename T> std::pair<src_buf_info*, size_type> operator()(column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // info for the data buffer *current = src_buf_info( col.type().id(), nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); return {current + 1, offset_stack_pos + offset_depth}; } template <typename T, typename... Args> std::enable_if_t<std::is_same_v<T, cudf::dictionary32>, std::pair<src_buf_info*, size_type>> operator()(Args&&...) { CUDF_FAIL("Unsupported type"); } private: std::pair<src_buf_info*, size_type> add_null_buffer(column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { // info for the validity buffer *current = src_buf_info( type_id::INT32, nullptr, offset_stack_pos, parent_offset_index, true, col.offset()); return {current + 1, offset_stack_pos + offset_depth}; } }; template <> std::pair<src_buf_info*, size_type> buf_info_functor::operator()<cudf::string_view>( column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // string columns hold no actual data, but we need to keep a record // of it so we know it's size when we are constructing the output columns *current = src_buf_info( type_id::STRING, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // string columns don't necessarily have children if (col.num_children() > 0) { CUDF_EXPECTS(col.num_children() == 2, "Encountered malformed string column"); strings_column_view scv(col); // info for the offsets buffer auto offset_col = current; CUDF_EXPECTS(not scv.offsets().nullable(), "Encountered nullable string offsets column"); *current = src_buf_info(type_id::INT32, // note: offsets can be null in the case where the string column // has been created with empty_like(). scv.offsets().begin<cudf::id_to_type<type_id::INT32>>(), offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // since we are crossing an offset boundary, calculate our new depth and parent offset index. offset_depth++; parent_offset_index = offset_col - head; // prevent appending buf_info for non-existent chars buffer CUDF_EXPECTS(not scv.chars().nullable(), "Encountered nullable string chars column"); // info for the chars buffer *current = src_buf_info( type_id::INT8, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; } return {current, offset_stack_pos}; } template <> std::pair<src_buf_info*, size_type> buf_info_functor::operator()<cudf::list_view>( column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { lists_column_view lcv(col); if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // list columns hold no actual data, but we need to keep a record // of it so we know it's size when we are constructing the output columns *current = src_buf_info( type_id::LIST, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; CUDF_EXPECTS(col.num_children() == 2, "Encountered malformed list column"); // info for the offsets buffer auto offset_col = current; *current = src_buf_info(type_id::INT32, // note: offsets can be null in the case where the lists column // has been created with empty_like(). lcv.offsets().begin<cudf::id_to_type<type_id::INT32>>(), offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // since we are crossing an offset boundary, calculate our new depth and parent offset index. offset_depth++; parent_offset_index = offset_col - head; return setup_source_buf_info(col.child_begin() + 1, col.child_end(), head, current, offset_stack_pos, parent_offset_index, offset_depth); } template <> std::pair<src_buf_info*, size_type> buf_info_functor::operator()<cudf::struct_view>( column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // struct columns hold no actual data, but we need to keep a record // of it so we know it's size when we are constructing the output columns *current = src_buf_info( type_id::STRUCT, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // recurse on children cudf::structs_column_view scv(col); std::vector<column_view> sliced_children; sliced_children.reserve(scv.num_children()); std::transform(thrust::make_counting_iterator(0), thrust::make_counting_iterator(scv.num_children()), std::back_inserter(sliced_children), [&scv](size_type child_index) { return scv.get_sliced_child(child_index); }); return setup_source_buf_info(sliced_children.begin(), sliced_children.end(), head, current, offset_stack_pos, parent_offset_index, offset_depth); } template <typename InputIter> std::pair<src_buf_info*, size_type> setup_source_buf_info(InputIter begin, InputIter end, src_buf_info* head, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { std::for_each(begin, end, [&](column_view const& col) { std::tie(current, offset_stack_pos) = cudf::type_dispatcher(col.type(), buf_info_functor{head}, col, current, offset_stack_pos, parent_offset_index, offset_depth); }); return {current, offset_stack_pos}; } /** * @brief Given a set of input columns and processed split buffers, produce * output columns. * * After performing the split we are left with 1 large buffer per incoming split * partition. We need to traverse this buffer and distribute the individual * subpieces that represent individual columns and children to produce the final * output columns. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param info_begin Iterator of dst_buf_info structs containing information about each * copied buffer * @param out_begin Output iterator of column views * @param base_ptr Pointer to the base address of copied data for the working partition * * @returns new dst_buf_info iterator after processing this range of input columns */ template <typename InputIter, typename BufInfo, typename Output> BufInfo build_output_columns(InputIter begin, InputIter end, BufInfo info_begin, Output out_begin, uint8_t const* const base_ptr) { auto current_info = info_begin; std::transform(begin, end, out_begin, [&current_info, base_ptr](column_view const& src) { auto [bitmask_ptr, null_count] = [&]() { if (src.nullable()) { auto const ptr = current_info->num_elements == 0 ? nullptr : reinterpret_cast<bitmask_type const*>(base_ptr + current_info->dst_offset); auto const null_count = current_info->num_elements == 0 ? 0 : (current_info->num_rows - current_info->valid_count); ++current_info; return std::pair(ptr, null_count); } return std::pair(static_cast<bitmask_type const*>(nullptr), 0); }(); // size/data pointer for the column auto const size = current_info->num_elements; uint8_t const* data_ptr = size == 0 || src.head() == nullptr ? nullptr : base_ptr + current_info->dst_offset; ++current_info; // children auto children = std::vector<column_view>{}; children.reserve(src.num_children()); current_info = build_output_columns( src.child_begin(), src.child_end(), current_info, std::back_inserter(children), base_ptr); return column_view{src.type(), size, data_ptr, bitmask_ptr, null_count, 0, std::move(children)}; }); return current_info; } /** * @brief Functor that retrieves the size of a destination buffer */ struct buf_size_functor { dst_buf_info const* ci; std::size_t operator() __device__(int index) { return ci[index].buf_size; } }; /** * @brief Functor that retrieves the split "key" for a given output * buffer index. * * The key is simply the partition index. */ struct split_key_functor { int num_columns; int operator() __device__(int buf_index) { return buf_index / num_columns; } }; /** * @brief Output iterator for writing values to the dst_offset field of the * dst_buf_info struct */ struct dst_offset_output_iterator { dst_buf_info* c; using value_type = std::size_t; using difference_type = std::size_t; using pointer = std::size_t*; using reference = std::size_t&; using iterator_category = thrust::output_device_iterator_tag; dst_offset_output_iterator operator+ __host__ __device__(int i) { return {c + i}; } void operator++ __host__ __device__() { c++; } reference operator[] __device__(int i) { return dereference(c + i); } reference operator* __device__() { return dereference(c); } private: reference __device__ dereference(dst_buf_info* c) { return c->dst_offset; } }; /** * @brief Output iterator for writing values to the valid_count field of the * dst_buf_info struct */ struct dst_valid_count_output_iterator { dst_buf_info* c; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; dst_valid_count_output_iterator operator+ __host__ __device__(int i) { return dst_valid_count_output_iterator{c + i}; } void operator++ __host__ __device__() { c++; } reference operator[] __device__(int i) { return dereference(c + i); } reference operator* __device__() { return dereference(c); } private: reference __device__ dereference(dst_buf_info* c) { return c->valid_count; } }; /** * @brief Functor for computing size of data elements for a given cudf type. * * Note: columns types which themselves inherently have no data (strings, lists, * structs) return 0. */ struct size_of_helper { template <typename T> constexpr std::enable_if_t<not is_fixed_width<T>(), int> __device__ operator()() const { return 0; } template <typename T> constexpr std::enable_if_t<is_fixed_width<T>(), int> __device__ operator()() const noexcept { return sizeof(cudf::device_storage_type_t<T>); } }; /** * @brief Functor for returning the number of chunks an input buffer is being * subdivided into during the repartitioning step. * * Note: columns types which themselves inherently have no data (strings, lists, * structs) return 0. */ struct num_chunks_func { thrust::pair<std::size_t, std::size_t> const* chunks; __device__ std::size_t operator()(size_type i) const { return thrust::get<0>(chunks[i]); } }; void copy_data(int num_bufs, int num_src_bufs, uint8_t const** d_src_bufs, uint8_t** d_dst_bufs, dst_buf_info* _d_dst_buf_info, rmm::cuda_stream_view stream) { // Since we parallelize at one block per copy, we are vulnerable to situations where we // have small numbers of copies to do (a combination of small numbers of splits and/or columns), // so we will take the actual set of outgoing source/destination buffers and further partition // them into much smaller chunks in order to drive up the number of blocks and overall occupancy. auto const desired_chunk_size = std::size_t{1 * 1024 * 1024}; rmm::device_uvector<thrust::pair<std::size_t, std::size_t>> chunks(num_bufs, stream); thrust::transform( rmm::exec_policy(stream), _d_dst_buf_info, _d_dst_buf_info + num_bufs, chunks.begin(), [desired_chunk_size] __device__( dst_buf_info const& buf) -> thrust::pair<std::size_t, std::size_t> { // Total bytes for this incoming partition std::size_t const bytes = static_cast<std::size_t>(buf.num_elements) * static_cast<std::size_t>(buf.element_size); // This clause handles nested data types (e.g. list or string) that store no data in the row // columns, only in their children. if (bytes == 0) { return {1, 0}; } // The number of chunks we want to subdivide this buffer into std::size_t const num_chunks = max(std::size_t{1}, util::round_up_unsafe(bytes, desired_chunk_size) / desired_chunk_size); // NOTE: leaving chunk size as a separate parameter for future tuning // possibilities, even though in the current implementation it will be a // constant. return {num_chunks, desired_chunk_size}; }); rmm::device_uvector<offset_type> chunk_offsets(num_bufs + 1, stream); auto buf_count_iter = cudf::detail::make_counting_transform_iterator( 0, [num_bufs, num_chunks = num_chunks_func{chunks.begin()}] __device__(size_type i) { return i == num_bufs ? 0 : num_chunks(i); }); thrust::exclusive_scan(rmm::exec_policy(stream), buf_count_iter, buf_count_iter + num_bufs + 1, chunk_offsets.begin(), 0); auto out_to_in_index = [chunk_offsets = chunk_offsets.begin(), num_bufs] __device__(size_type i) { return static_cast<size_type>( thrust::upper_bound(thrust::seq, chunk_offsets, chunk_offsets + num_bufs + 1, i) - chunk_offsets) - 1; }; // apply the chunking. auto const num_chunks = cudf::detail::make_counting_transform_iterator(0, num_chunks_func{chunks.begin()}); size_type const new_buf_count = thrust::reduce(rmm::exec_policy(stream), num_chunks, num_chunks + chunks.size()); rmm::device_uvector<dst_buf_info> d_dst_buf_info(new_buf_count, stream); auto iter = thrust::make_counting_iterator(0); thrust::for_each( rmm::exec_policy(stream), iter, iter + new_buf_count, [_d_dst_buf_info, d_dst_buf_info = d_dst_buf_info.begin(), chunks = chunks.begin(), chunk_offsets = chunk_offsets.begin(), num_bufs, num_src_bufs, out_to_in_index] __device__(size_type i) { size_type const in_buf_index = out_to_in_index(i); size_type const chunk_index = i - chunk_offsets[in_buf_index]; auto const chunk_size = thrust::get<1>(chunks[in_buf_index]); dst_buf_info const& in = _d_dst_buf_info[in_buf_index]; // adjust info dst_buf_info& out = d_dst_buf_info[i]; out.element_size = in.element_size; out.value_shift = in.value_shift; out.bit_shift = in.bit_shift; out.valid_count = in.valid_count; // valid count will be set to 1 if this is a validity buffer out.src_buf_index = in.src_buf_index; out.dst_buf_index = in.dst_buf_index; size_type const elements_per_chunk = out.element_size == 0 ? 0 : chunk_size / out.element_size; out.num_elements = ((chunk_index + 1) * elements_per_chunk) > in.num_elements ? in.num_elements - (chunk_index * elements_per_chunk) : elements_per_chunk; size_type const rows_per_chunk = // if this is a validity buffer, each element is a bitmask_type, which // corresponds to 32 rows. out.valid_count > 0 ? elements_per_chunk * static_cast<size_type>(detail::size_in_bits<bitmask_type>()) : elements_per_chunk; out.num_rows = ((chunk_index + 1) * rows_per_chunk) > in.num_rows ? in.num_rows - (chunk_index * rows_per_chunk) : rows_per_chunk; out.src_element_index = in.src_element_index + (chunk_index * elements_per_chunk); out.dst_offset = in.dst_offset + (chunk_index * chunk_size); // out.bytes and out.buf_size are unneeded here because they are only used to // calculate real output buffer sizes. the data we are generating here is // purely intermediate for the purposes of doing more uniform copying of data // underneath the final structure of the output }); // perform the copy constexpr size_type block_size = 256; hipLaunchKernelGGL(( copy_partitions<block_size>), dim3(new_buf_count), dim3(block_size), 0, stream.value(), d_src_bufs, d_dst_bufs, d_dst_buf_info.data()); // postprocess valid_counts auto keys = cudf::detail::make_counting_transform_iterator( 0, [out_to_in_index] __device__(size_type i) { return out_to_in_index(i); }); auto values = thrust::make_transform_iterator( d_dst_buf_info.begin(), [] __device__(dst_buf_info const& info) { return info.valid_count; }); thrust::reduce_by_key(rmm::exec_policy(stream), keys, keys + new_buf_count, values, thrust::make_discard_iterator(), dst_valid_count_output_iterator{_d_dst_buf_info}); } }; // anonymous namespace namespace detail { std::vector<packed_table> contiguous_split(cudf::table_view const& input, std::vector<size_type> const& splits, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.num_columns() == 0) { return {}; } if (splits.size() > 0) { CUDF_EXPECTS(splits.back() <= input.column(0).size(), "splits can't exceed size of input columns"); } { size_type begin = 0; for (std::size_t i = 0; i < splits.size(); i++) { size_type end = splits[i]; CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative."); CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index."); CUDF_EXPECTS(end <= input.column(0).size(), "Slice range out of bounds."); begin = end; } } std::size_t const num_partitions = splits.size() + 1; std::size_t const num_root_columns = input.num_columns(); // if inputs are empty, just return num_partitions empty tables if (input.column(0).size() == 0) { // sanitize the inputs (to handle corner cases like sliced tables) std::vector<std::unique_ptr<column>> empty_columns; empty_columns.reserve(input.num_columns()); std::transform( input.begin(), input.end(), std::back_inserter(empty_columns), [](column_view const& col) { return cudf::empty_like(col); }); std::vector<cudf::column_view> empty_column_views; empty_column_views.reserve(input.num_columns()); std::transform(empty_columns.begin(), empty_columns.end(), std::back_inserter(empty_column_views), [](std::unique_ptr<column> const& col) { return col->view(); }); table_view empty_inputs(empty_column_views); // build the empty results std::vector<packed_table> result; result.reserve(num_partitions); auto iter = thrust::make_counting_iterator(0); std::transform(iter, iter + num_partitions, std::back_inserter(result), [&empty_inputs](int partition_index) { return packed_table{ empty_inputs, packed_columns{std::make_unique<packed_columns::metadata>(pack_metadata( empty_inputs, static_cast<uint8_t const*>(nullptr), 0)), std::make_unique<rmm::device_buffer>()}}; }); return result; } // compute # of source buffers (column data, validity, children), # of partitions // and total # of buffers size_type const num_src_bufs = count_src_bufs(input.begin(), input.end()); std::size_t const num_bufs = num_src_bufs * num_partitions; // packed block of memory 1. split indices and src_buf_info structs std::size_t const indices_size = cudf::util::round_up_safe((num_partitions + 1) * sizeof(size_type), split_align); std::size_t const src_buf_info_size = cudf::util::round_up_safe(num_src_bufs * sizeof(src_buf_info), split_align); // host-side std::vector<uint8_t> h_indices_and_source_info(indices_size + src_buf_info_size); size_type* h_indices = reinterpret_cast<size_type*>(h_indices_and_source_info.data()); src_buf_info* h_src_buf_info = reinterpret_cast<src_buf_info*>(h_indices_and_source_info.data() + indices_size); // device-side // gpu-only : stack space needed for nested list offset calculation int const offset_stack_partition_size = compute_offset_stack_size(input.begin(), input.end()); std::size_t const offset_stack_size = offset_stack_partition_size * num_partitions * sizeof(size_type); rmm::device_buffer d_indices_and_source_info(indices_size + src_buf_info_size + offset_stack_size, stream, rmm::mr::get_current_device_resource()); auto* d_indices = reinterpret_cast<size_type*>(d_indices_and_source_info.data()); src_buf_info* d_src_buf_info = reinterpret_cast<src_buf_info*>( reinterpret_cast<uint8_t*>(d_indices_and_source_info.data()) + indices_size); size_type* d_offset_stack = reinterpret_cast<size_type*>(reinterpret_cast<uint8_t*>(d_indices_and_source_info.data()) + indices_size + src_buf_info_size); // compute splits -> indices. h_indices[0] = 0; h_indices[num_partitions] = input.column(0).size(); std::copy(splits.begin(), splits.end(), std::next(h_indices)); // setup source buf info setup_source_buf_info(input.begin(), input.end(), h_src_buf_info, h_src_buf_info); // HtoD indices and source buf info to device CUDF_CUDA_TRY(hipMemcpyAsync(d_indices, h_indices, indices_size + src_buf_info_size, hipMemcpyHostToDevice, stream.value())); // packed block of memory 2. partition buffer sizes and dst_buf_info structs std::size_t const buf_sizes_size = cudf::util::round_up_safe(num_partitions * sizeof(std::size_t), split_align); std::size_t const dst_buf_info_size = cudf::util::round_up_safe(num_bufs * sizeof(dst_buf_info), split_align); // host-side std::vector<uint8_t> h_buf_sizes_and_dst_info(buf_sizes_size + dst_buf_info_size); std::size_t* h_buf_sizes = reinterpret_cast<std::size_t*>(h_buf_sizes_and_dst_info.data()); dst_buf_info* h_dst_buf_info = reinterpret_cast<dst_buf_info*>(h_buf_sizes_and_dst_info.data() + buf_sizes_size); // device-side rmm::device_buffer d_buf_sizes_and_dst_info( buf_sizes_size + dst_buf_info_size, stream, rmm::mr::get_current_device_resource()); std::size_t* d_buf_sizes = reinterpret_cast<std::size_t*>(d_buf_sizes_and_dst_info.data()); dst_buf_info* d_dst_buf_info = reinterpret_cast<dst_buf_info*>( static_cast<uint8_t*>(d_buf_sizes_and_dst_info.data()) + buf_sizes_size); // compute sizes of each column in each partition, including alignment. thrust::transform( rmm::exec_policy(stream), thrust::make_counting_iterator<std::size_t>(0), thrust::make_counting_iterator<std::size_t>(num_bufs), d_dst_buf_info, [num_src_bufs, d_indices, d_src_buf_info, d_offset_stack, offset_stack_partition_size] __device__(std::size_t t) { int const split_index = t / num_src_bufs; int const src_buf_index = t % num_src_bufs; auto const& src_info = d_src_buf_info[src_buf_index]; // apply nested offsets (lists and string columns). // // We can't just use the incoming row indices to figure out where to read from in a // nested list situation. We have to apply offsets every time we cross a boundary // (list or string). This loop applies those offsets so that our incoming row_index_start // and row_index_end get transformed to our final values. // int const stack_pos = src_info.offset_stack_pos + (split_index * offset_stack_partition_size); size_type* offset_stack = &d_offset_stack[stack_pos]; int parent_offsets_index = src_info.parent_offsets_index; int stack_size = 0; int root_column_offset = src_info.column_offset; while (parent_offsets_index >= 0) { offset_stack[stack_size++] = parent_offsets_index; root_column_offset = d_src_buf_info[parent_offsets_index].column_offset; parent_offsets_index = d_src_buf_info[parent_offsets_index].parent_offsets_index; } // make sure to include the -column- offset on the root column in our calculation. int row_start = d_indices[split_index] + root_column_offset; int row_end = d_indices[split_index + 1] + root_column_offset; while (stack_size > 0) { stack_size--; auto const offsets = d_src_buf_info[offset_stack[stack_size]].offsets; // this case can happen when you have empty string or list columns constructed with // empty_like() if (offsets != nullptr) { row_start = offsets[row_start]; row_end = offsets[row_end]; } } // final element indices and row count int const out_element_index = src_info.is_validity ? row_start / 32 : row_start; int const num_rows = row_end - row_start; // if I am an offsets column, all my values need to be shifted int const value_shift = src_info.offsets == nullptr ? 0 : src_info.offsets[row_start]; // if I am a validity column, we may need to shift bits int const bit_shift = src_info.is_validity ? row_start % 32 : 0; // # of rows isn't necessarily the same as # of elements to be copied. auto const num_elements = [&]() { if (src_info.offsets != nullptr && num_rows > 0) { return num_rows + 1; } else if (src_info.is_validity) { return (num_rows + 31) / 32; } return num_rows; }(); int const element_size = cudf::type_dispatcher(data_type{src_info.type}, size_of_helper{}); std::size_t const bytes = static_cast<std::size_t>(num_elements) * static_cast<std::size_t>(element_size); return dst_buf_info{util::round_up_unsafe(bytes, split_align), num_elements, element_size, num_rows, out_element_index, 0, value_shift, bit_shift, src_info.is_validity ? 1 : 0, src_buf_index, split_index}; }); // compute total size of each partition { // key is split index auto keys = cudf::detail::make_counting_transform_iterator( 0, split_key_functor{static_cast<int>(num_src_bufs)}); auto values = cudf::detail::make_counting_transform_iterator(0, buf_size_functor{d_dst_buf_info}); thrust::reduce_by_key(rmm::exec_policy(stream), keys, keys + num_bufs, values, thrust::make_discard_iterator(), d_buf_sizes); } // compute start offset for each output buffer { auto keys = cudf::detail::make_counting_transform_iterator( 0, split_key_functor{static_cast<int>(num_src_bufs)}); auto values = cudf::detail::make_counting_transform_iterator(0, buf_size_functor{d_dst_buf_info}); thrust::exclusive_scan_by_key(rmm::exec_policy(stream), keys, keys + num_bufs, values, dst_offset_output_iterator{d_dst_buf_info}, std::size_t{0}); } // DtoH buf sizes and col info back to the host CUDF_CUDA_TRY(hipMemcpyAsync(h_buf_sizes, d_buf_sizes, buf_sizes_size + dst_buf_info_size, hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); // allocate output partition buffers std::vector<rmm::device_buffer> out_buffers; out_buffers.reserve(num_partitions); std::transform(h_buf_sizes, h_buf_sizes + num_partitions, std::back_inserter(out_buffers), [stream, mr](std::size_t bytes) { return rmm::device_buffer{bytes, stream, mr}; }); // packed block of memory 3. pointers to source and destination buffers (and stack space on the // gpu for offset computation) std::size_t const src_bufs_size = cudf::util::round_up_safe(num_src_bufs * sizeof(uint8_t*), split_align); std::size_t const dst_bufs_size = cudf::util::round_up_safe(num_partitions * sizeof(uint8_t*), split_align); // host-side std::vector<uint8_t> h_src_and_dst_buffers(src_bufs_size + dst_bufs_size); uint8_t const** h_src_bufs = reinterpret_cast<uint8_t const**>(h_src_and_dst_buffers.data()); uint8_t** h_dst_bufs = reinterpret_cast<uint8_t**>(h_src_and_dst_buffers.data() + src_bufs_size); // device-side rmm::device_buffer d_src_and_dst_buffers(src_bufs_size + dst_bufs_size + offset_stack_size, stream, rmm::mr::get_current_device_resource()); auto const** d_src_bufs = reinterpret_cast<uint8_t const**>(d_src_and_dst_buffers.data()); uint8_t** d_dst_bufs = reinterpret_cast<uint8_t**>( reinterpret_cast<uint8_t*>(d_src_and_dst_buffers.data()) + src_bufs_size); // setup src buffers setup_src_buf_data(input.begin(), input.end(), h_src_bufs); // setup dst buffers std::transform(out_buffers.begin(), out_buffers.end(), h_dst_bufs, [](auto& buf) { return static_cast<uint8_t*>(buf.data()); }); // HtoD src and dest buffers CUDF_CUDA_TRY(hipMemcpyAsync( d_src_bufs, h_src_bufs, src_bufs_size + dst_bufs_size, hipMemcpyHostToDevice, stream.value())); // perform the copy. copy_data(num_bufs, num_src_bufs, d_src_bufs, d_dst_bufs, d_dst_buf_info, stream); // DtoH dst info (to retrieve null counts) CUDF_CUDA_TRY(hipMemcpyAsync( h_dst_buf_info, d_dst_buf_info, dst_buf_info_size, hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); // build the output. std::vector<packed_table> result; result.reserve(num_partitions); std::vector<column_view> cols; cols.reserve(num_root_columns); auto cur_dst_buf_info = h_dst_buf_info; for (std::size_t idx = 0; idx < num_partitions; idx++) { // traverse the buffers and build the columns. cur_dst_buf_info = build_output_columns( input.begin(), input.end(), cur_dst_buf_info, std::back_inserter(cols), h_dst_bufs[idx]); // pack the columns cudf::table_view t{cols}; result.push_back(packed_table{ t, packed_columns{ std::make_unique<packed_columns::metadata>(cudf::pack_metadata( t, reinterpret_cast<uint8_t const*>(out_buffers[idx].data()), out_buffers[idx].size())), std::make_unique<rmm::device_buffer>(std::move(out_buffers[idx]))}}); cols.clear(); } return result; } }; // namespace detail std::vector<packed_table> contiguous_split(cudf::table_view const& input, std::vector<size_type> const& splits, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contiguous_split(input, splits, cudf::get_default_stream(), mr); } }; // namespace cudf
0c5981e9672d7a8628d626340cab3a05e8eea7b0.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/iterator_categories.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/pair.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <cstddef> #include <numeric> namespace cudf { namespace { // align all column size allocations to this boundary so that all output column buffers // start at that alignment. static constexpr std::size_t split_align = 64; /** * @brief Struct which contains information on a source buffer. * * The definition of "buffer" used throughout this module is a component piece of a * cudf column. So for example, a fixed-width column with validity would have 2 associated * buffers : the data itself and the validity buffer. contiguous_split operates by breaking * each column up into it's individual components and copying each one as a separate kernel * block. */ struct src_buf_info { src_buf_info(cudf::type_id _type, const int* _offsets, int _offset_stack_pos, int _parent_offsets_index, bool _is_validity, size_type _column_offset) : type(_type), offsets(_offsets), offset_stack_pos(_offset_stack_pos), parent_offsets_index(_parent_offsets_index), is_validity(_is_validity), column_offset(_column_offset) { } cudf::type_id type; const int* offsets; // a pointer to device memory offsets if I am an offset buffer int offset_stack_pos; // position in the offset stack buffer int parent_offsets_index; // immediate parent that has offsets, or -1 if none bool is_validity; // if I am a validity buffer size_type column_offset; // offset in the case of a sliced column }; /** * @brief Struct which contains information on a destination buffer. * * Similar to src_buf_info, dst_buf_info contains information on a destination buffer we * are going to copy to. If we have N input buffers (which come from X columns), and * M partitions, then we have N*M destination buffers. */ struct dst_buf_info { // constant across all copy commands for this buffer std::size_t buf_size; // total size of buffer, including padding int num_elements; // # of elements to be copied int element_size; // size of each element in bytes int num_rows; // # of rows to be copied(which may be different from num_elements in the case of // validity or offset buffers) int src_element_index; // element index to start reading from from my associated source buffer std::size_t dst_offset; // my offset into the per-partition allocation int value_shift; // amount to shift values down by (for offset buffers) int bit_shift; // # of bits to shift right by (for validity buffers) size_type valid_count; // validity count for this block of work int src_buf_index; // source buffer index int dst_buf_index; // destination buffer index }; /** * @brief Copy a single buffer of column data, shifting values (for offset columns), * and validity (for validity buffers) as necessary. * * Copies a single partition of a source column buffer to a destination buffer. Shifts * element values by value_shift in the case of a buffer of offsets (value_shift will * only ever be > 0 in that case). Shifts elements bitwise by bit_shift in the case of * a validity buffer (bif_shift will only ever be > 0 in that case). This function assumes * value_shift and bit_shift will never be > 0 at the same time. * * This function expects: * - src may be a misaligned address * - dst must be an aligned address * * This function always does the ALU work related to value_shift and bit_shift because it is * entirely memory-bandwidth bound. * * @param dst Destination buffer * @param src Source buffer * @param t Thread index * @param num_elements Number of elements to copy * @param element_size Size of each element in bytes * @param src_element_index Element index to start copying at * @param stride Size of the kernel block * @param value_shift Shift incoming 4-byte offset values down by this amount * @param bit_shift Shift incoming data right by this many bits * @param num_rows Number of rows being copied * @param valid_count Optional pointer to a value to store count of set bits */ template <int block_size> __device__ void copy_buffer(uint8_t* __restrict__ dst, uint8_t const* __restrict__ src, int t, std::size_t num_elements, std::size_t element_size, std::size_t src_element_index, uint32_t stride, int value_shift, int bit_shift, std::size_t num_rows, size_type* valid_count) { src += (src_element_index * element_size); size_type thread_valid_count = 0; // handle misalignment. read 16 bytes in 4 byte reads. write in a single 16 byte store. std::size_t const num_bytes = num_elements * element_size; // how many bytes we're misaligned from 4-byte alignment uint32_t const ofs = reinterpret_cast<uintptr_t>(src) % 4; std::size_t pos = t * 16; stride *= 16; while (pos + 20 <= num_bytes) { // read from the nearest aligned address. const uint32_t* in32 = reinterpret_cast<const uint32_t*>((src + pos) - ofs); uint4 v = uint4{in32[0], in32[1], in32[2], in32[3]}; if (ofs || bit_shift) { v.x = __funnelshift_r(v.x, v.y, ofs * 8 + bit_shift); v.y = __funnelshift_r(v.y, v.z, ofs * 8 + bit_shift); v.z = __funnelshift_r(v.z, v.w, ofs * 8 + bit_shift); v.w = __funnelshift_r(v.w, in32[4], ofs * 8 + bit_shift); } v.x -= value_shift; v.y -= value_shift; v.z -= value_shift; v.w -= value_shift; reinterpret_cast<uint4*>(dst)[pos / 16] = v; if (valid_count) { thread_valid_count += (__popc(v.x) + __popc(v.y) + __popc(v.z) + __popc(v.w)); } pos += stride; } // copy trailing bytes if (t == 0) { std::size_t remainder; if (num_bytes < 16) { remainder = num_bytes; } else { std::size_t const last_bracket = (num_bytes / 16) * 16; remainder = num_bytes - last_bracket; if (remainder < 4) { // we had less than 20 bytes for the last possible 16 byte copy, so copy 16 + the extra remainder += 16; } } // if we're performing a value shift (offsets), or a bit shift (validity) the # of bytes and // alignment must be a multiple of 4. value shifting and bit shifting are mutually exclusive // and will never both be true at the same time. if (value_shift || bit_shift) { std::size_t idx = (num_bytes - remainder) / 4; uint32_t v = remainder > 0 ? (reinterpret_cast<uint32_t const*>(src)[idx] - value_shift) : 0; constexpr size_type rows_per_element = 32; auto const have_trailing_bits = ((num_elements * rows_per_element) - num_rows) < bit_shift; while (remainder) { // if we're at the very last word of a validity copy, we do not always need to read the next // word to get the final trailing bits. auto const read_trailing_bits = bit_shift > 0 && remainder == 4 && have_trailing_bits; uint32_t const next = (read_trailing_bits || remainder > 4) ? (reinterpret_cast<uint32_t const*>(src)[idx + 1] - value_shift) : 0; uint32_t const val = (v >> bit_shift) | (next << (32 - bit_shift)); if (valid_count) { thread_valid_count += __popc(val); } reinterpret_cast<uint32_t*>(dst)[idx] = val; v = next; idx++; remainder -= 4; } } else { while (remainder) { std::size_t const idx = num_bytes - remainder--; uint32_t const val = reinterpret_cast<uint8_t const*>(src)[idx]; if (valid_count) { thread_valid_count += __popc(val); } reinterpret_cast<uint8_t*>(dst)[idx] = val; } } } if (valid_count) { if (num_bytes == 0) { if (!t) { *valid_count = 0; } } else { using BlockReduce = cub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_valid_count{BlockReduce(temp_storage).Sum(thread_valid_count)}; if (!t) { // we may have copied more bits than there are actual rows in the output. // so we need to subtract off the count of any bits that shouldn't have been // considered during the copy step. std::size_t const max_row = (num_bytes * 8); std::size_t const slack_bits = max_row > num_rows ? max_row - num_rows : 0; auto const slack_mask = set_most_significant_bits(slack_bits); if (slack_mask > 0) { uint32_t const last_word = reinterpret_cast<uint32_t*>(dst + (num_bytes - 4))[0]; block_valid_count -= __popc(last_word & slack_mask); } *valid_count = block_valid_count; } } } } /** * @brief Kernel which copies data from multiple source buffers to multiple * destination buffers. * * When doing a contiguous_split on X columns comprising N total internal buffers * with M splits, we end up having to copy N*M source/destination buffer pairs. * These logical copies are further subdivided to distribute the amount of work * to be done as evenly as possible across the multiprocessors on the device. * This kernel is arranged such that each block copies 1 source/destination pair. * * @param src_bufs Input source buffers * @param dst_bufs Destination buffers * @param buf_info Information on the range of values to be copied for each destination buffer. */ template <int block_size> __global__ void copy_partitions(uint8_t const** src_bufs, uint8_t** dst_bufs, dst_buf_info* buf_info) { auto const buf_index = blockIdx.x; auto const src_buf_index = buf_info[buf_index].src_buf_index; auto const dst_buf_index = buf_info[buf_index].dst_buf_index; // copy, shifting offsets and validity bits as needed copy_buffer<block_size>( dst_bufs[dst_buf_index] + buf_info[buf_index].dst_offset, src_bufs[src_buf_index], threadIdx.x, buf_info[buf_index].num_elements, buf_info[buf_index].element_size, buf_info[buf_index].src_element_index, blockDim.x, buf_info[buf_index].value_shift, buf_info[buf_index].bit_shift, buf_info[buf_index].num_rows, buf_info[buf_index].valid_count > 0 ? &buf_info[buf_index].valid_count : nullptr); } // The block of functions below are all related: // // compute_offset_stack_size() // setup_src_buf_data() // count_src_bufs() // setup_source_buf_info() // build_output_columns() // // Critically, they all traverse the hierarchy of source columns and their children // in a specific order to guarantee they produce various outputs in a consistent // way. For example, setup_src_buf_info() produces a series of information // structs that must appear in the same order that setup_src_buf_data() produces // buffers. // // So please be careful if you change the way in which these functions and // functors traverse the hierarchy. /** * @brief Returns whether or not the specified type is a column that contains offsets. */ bool is_offset_type(type_id id) { return (id == type_id::STRING or id == type_id::LIST); } /** * @brief Compute total device memory stack size needed to process nested * offsets per-output buffer. * * When determining the range of rows to be copied for each output buffer * we have to recursively apply the stack of offsets from our parent columns * (lists or strings). We want to do this computation on the gpu because offsets * are stored in device memory. However we don't want to do recursion on the gpu, so * each destination buffer gets a "stack" of space to work with equal in size to * it's offset nesting depth. This function computes the total size of all of those * stacks. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param offset_depth Current offset nesting depth * * @returns Total offset stack size needed for this range of columns. */ template <typename InputIter> std::size_t compute_offset_stack_size(InputIter begin, InputIter end, int offset_depth = 0) { return std::accumulate(begin, end, 0, [offset_depth](auto stack_size, column_view const& col) { auto const num_buffers = 1 + (col.nullable() ? 1 : 0); return stack_size + (offset_depth * num_buffers) + compute_offset_stack_size( col.child_begin(), col.child_end(), offset_depth + is_offset_type(col.type().id())); }); } /** * @brief Retrieve all buffers for a range of source columns. * * Retrieve the individual buffers that make up a range of input columns. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param out_buf Iterator into output buffer infos * * @returns next output buffer iterator */ template <typename InputIter, typename OutputIter> OutputIter setup_src_buf_data(InputIter begin, InputIter end, OutputIter out_buf) { std::for_each(begin, end, [&out_buf](column_view const& col) { if (col.nullable()) { *out_buf = reinterpret_cast<uint8_t const*>(col.null_mask()); out_buf++; } // NOTE: we're always returning the base pointer here. column-level offset is accounted // for later. Also, for some column types (string, list, struct) this pointer will be null // because there is no associated data with the root column. *out_buf = col.head<uint8_t>(); out_buf++; out_buf = setup_src_buf_data(col.child_begin(), col.child_end(), out_buf); }); return out_buf; } /** * @brief Count the total number of source buffers we will be copying * from. * * This count includes buffers for all input columns. For example a * fixed-width column with validity would be 2 buffers (data, validity). * A string column with validity would be 3 buffers (chars, offsets, validity). * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * * @returns total number of source buffers for this range of columns */ template <typename InputIter> size_type count_src_bufs(InputIter begin, InputIter end) { auto buf_iter = thrust::make_transform_iterator(begin, [](column_view const& col) { return 1 + (col.nullable() ? 1 : 0) + count_src_bufs(col.child_begin(), col.child_end()); }); return std::accumulate(buf_iter, buf_iter + std::distance(begin, end), 0); } /** * @brief Computes source buffer information for the copy kernel. * * For each input column to be split we need to know several pieces of information * in the copy kernel. This function traverses the input columns and prepares this * information for the gpu. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param head Beginning of source buffer info array * @param current Current source buffer info to be written to * @param offset_stack_pos Integer representing our current offset nesting depth * (how many list or string levels deep we are) * @param parent_offset_index Index into src_buf_info output array indicating our nearest * containing list parent. -1 if we have no list parent * @param offset_depth Current offset nesting depth (how many list levels deep we are) * * @returns next src_buf_output after processing this range of input columns */ // setup source buf info template <typename InputIter> std::pair<src_buf_info*, size_type> setup_source_buf_info(InputIter begin, InputIter end, src_buf_info* head, src_buf_info* current, int offset_stack_pos = 0, int parent_offset_index = -1, int offset_depth = 0); /** * @brief Functor that builds source buffer information based on input columns. * * Called by setup_source_buf_info to build information for a single source column. This function * will recursively call setup_source_buf_info in the case of nested types. */ struct buf_info_functor { src_buf_info* head; template <typename T> std::pair<src_buf_info*, size_type> operator()(column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // info for the data buffer *current = src_buf_info( col.type().id(), nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); return {current + 1, offset_stack_pos + offset_depth}; } template <typename T, typename... Args> std::enable_if_t<std::is_same_v<T, cudf::dictionary32>, std::pair<src_buf_info*, size_type>> operator()(Args&&...) { CUDF_FAIL("Unsupported type"); } private: std::pair<src_buf_info*, size_type> add_null_buffer(column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { // info for the validity buffer *current = src_buf_info( type_id::INT32, nullptr, offset_stack_pos, parent_offset_index, true, col.offset()); return {current + 1, offset_stack_pos + offset_depth}; } }; template <> std::pair<src_buf_info*, size_type> buf_info_functor::operator()<cudf::string_view>( column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // string columns hold no actual data, but we need to keep a record // of it so we know it's size when we are constructing the output columns *current = src_buf_info( type_id::STRING, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // string columns don't necessarily have children if (col.num_children() > 0) { CUDF_EXPECTS(col.num_children() == 2, "Encountered malformed string column"); strings_column_view scv(col); // info for the offsets buffer auto offset_col = current; CUDF_EXPECTS(not scv.offsets().nullable(), "Encountered nullable string offsets column"); *current = src_buf_info(type_id::INT32, // note: offsets can be null in the case where the string column // has been created with empty_like(). scv.offsets().begin<cudf::id_to_type<type_id::INT32>>(), offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // since we are crossing an offset boundary, calculate our new depth and parent offset index. offset_depth++; parent_offset_index = offset_col - head; // prevent appending buf_info for non-existent chars buffer CUDF_EXPECTS(not scv.chars().nullable(), "Encountered nullable string chars column"); // info for the chars buffer *current = src_buf_info( type_id::INT8, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; } return {current, offset_stack_pos}; } template <> std::pair<src_buf_info*, size_type> buf_info_functor::operator()<cudf::list_view>( column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { lists_column_view lcv(col); if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // list columns hold no actual data, but we need to keep a record // of it so we know it's size when we are constructing the output columns *current = src_buf_info( type_id::LIST, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; CUDF_EXPECTS(col.num_children() == 2, "Encountered malformed list column"); // info for the offsets buffer auto offset_col = current; *current = src_buf_info(type_id::INT32, // note: offsets can be null in the case where the lists column // has been created with empty_like(). lcv.offsets().begin<cudf::id_to_type<type_id::INT32>>(), offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // since we are crossing an offset boundary, calculate our new depth and parent offset index. offset_depth++; parent_offset_index = offset_col - head; return setup_source_buf_info(col.child_begin() + 1, col.child_end(), head, current, offset_stack_pos, parent_offset_index, offset_depth); } template <> std::pair<src_buf_info*, size_type> buf_info_functor::operator()<cudf::struct_view>( column_view const& col, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { if (col.nullable()) { std::tie(current, offset_stack_pos) = add_null_buffer(col, current, offset_stack_pos, parent_offset_index, offset_depth); } // struct columns hold no actual data, but we need to keep a record // of it so we know it's size when we are constructing the output columns *current = src_buf_info( type_id::STRUCT, nullptr, offset_stack_pos, parent_offset_index, false, col.offset()); current++; offset_stack_pos += offset_depth; // recurse on children cudf::structs_column_view scv(col); std::vector<column_view> sliced_children; sliced_children.reserve(scv.num_children()); std::transform(thrust::make_counting_iterator(0), thrust::make_counting_iterator(scv.num_children()), std::back_inserter(sliced_children), [&scv](size_type child_index) { return scv.get_sliced_child(child_index); }); return setup_source_buf_info(sliced_children.begin(), sliced_children.end(), head, current, offset_stack_pos, parent_offset_index, offset_depth); } template <typename InputIter> std::pair<src_buf_info*, size_type> setup_source_buf_info(InputIter begin, InputIter end, src_buf_info* head, src_buf_info* current, int offset_stack_pos, int parent_offset_index, int offset_depth) { std::for_each(begin, end, [&](column_view const& col) { std::tie(current, offset_stack_pos) = cudf::type_dispatcher(col.type(), buf_info_functor{head}, col, current, offset_stack_pos, parent_offset_index, offset_depth); }); return {current, offset_stack_pos}; } /** * @brief Given a set of input columns and processed split buffers, produce * output columns. * * After performing the split we are left with 1 large buffer per incoming split * partition. We need to traverse this buffer and distribute the individual * subpieces that represent individual columns and children to produce the final * output columns. * * This function is called recursively in the case of nested types. * * @param begin Beginning of input columns * @param end End of input columns * @param info_begin Iterator of dst_buf_info structs containing information about each * copied buffer * @param out_begin Output iterator of column views * @param base_ptr Pointer to the base address of copied data for the working partition * * @returns new dst_buf_info iterator after processing this range of input columns */ template <typename InputIter, typename BufInfo, typename Output> BufInfo build_output_columns(InputIter begin, InputIter end, BufInfo info_begin, Output out_begin, uint8_t const* const base_ptr) { auto current_info = info_begin; std::transform(begin, end, out_begin, [&current_info, base_ptr](column_view const& src) { auto [bitmask_ptr, null_count] = [&]() { if (src.nullable()) { auto const ptr = current_info->num_elements == 0 ? nullptr : reinterpret_cast<bitmask_type const*>(base_ptr + current_info->dst_offset); auto const null_count = current_info->num_elements == 0 ? 0 : (current_info->num_rows - current_info->valid_count); ++current_info; return std::pair(ptr, null_count); } return std::pair(static_cast<bitmask_type const*>(nullptr), 0); }(); // size/data pointer for the column auto const size = current_info->num_elements; uint8_t const* data_ptr = size == 0 || src.head() == nullptr ? nullptr : base_ptr + current_info->dst_offset; ++current_info; // children auto children = std::vector<column_view>{}; children.reserve(src.num_children()); current_info = build_output_columns( src.child_begin(), src.child_end(), current_info, std::back_inserter(children), base_ptr); return column_view{src.type(), size, data_ptr, bitmask_ptr, null_count, 0, std::move(children)}; }); return current_info; } /** * @brief Functor that retrieves the size of a destination buffer */ struct buf_size_functor { dst_buf_info const* ci; std::size_t operator() __device__(int index) { return ci[index].buf_size; } }; /** * @brief Functor that retrieves the split "key" for a given output * buffer index. * * The key is simply the partition index. */ struct split_key_functor { int num_columns; int operator() __device__(int buf_index) { return buf_index / num_columns; } }; /** * @brief Output iterator for writing values to the dst_offset field of the * dst_buf_info struct */ struct dst_offset_output_iterator { dst_buf_info* c; using value_type = std::size_t; using difference_type = std::size_t; using pointer = std::size_t*; using reference = std::size_t&; using iterator_category = thrust::output_device_iterator_tag; dst_offset_output_iterator operator+ __host__ __device__(int i) { return {c + i}; } void operator++ __host__ __device__() { c++; } reference operator[] __device__(int i) { return dereference(c + i); } reference operator* __device__() { return dereference(c); } private: reference __device__ dereference(dst_buf_info* c) { return c->dst_offset; } }; /** * @brief Output iterator for writing values to the valid_count field of the * dst_buf_info struct */ struct dst_valid_count_output_iterator { dst_buf_info* c; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; dst_valid_count_output_iterator operator+ __host__ __device__(int i) { return dst_valid_count_output_iterator{c + i}; } void operator++ __host__ __device__() { c++; } reference operator[] __device__(int i) { return dereference(c + i); } reference operator* __device__() { return dereference(c); } private: reference __device__ dereference(dst_buf_info* c) { return c->valid_count; } }; /** * @brief Functor for computing size of data elements for a given cudf type. * * Note: columns types which themselves inherently have no data (strings, lists, * structs) return 0. */ struct size_of_helper { template <typename T> constexpr std::enable_if_t<not is_fixed_width<T>(), int> __device__ operator()() const { return 0; } template <typename T> constexpr std::enable_if_t<is_fixed_width<T>(), int> __device__ operator()() const noexcept { return sizeof(cudf::device_storage_type_t<T>); } }; /** * @brief Functor for returning the number of chunks an input buffer is being * subdivided into during the repartitioning step. * * Note: columns types which themselves inherently have no data (strings, lists, * structs) return 0. */ struct num_chunks_func { thrust::pair<std::size_t, std::size_t> const* chunks; __device__ std::size_t operator()(size_type i) const { return thrust::get<0>(chunks[i]); } }; void copy_data(int num_bufs, int num_src_bufs, uint8_t const** d_src_bufs, uint8_t** d_dst_bufs, dst_buf_info* _d_dst_buf_info, rmm::cuda_stream_view stream) { // Since we parallelize at one block per copy, we are vulnerable to situations where we // have small numbers of copies to do (a combination of small numbers of splits and/or columns), // so we will take the actual set of outgoing source/destination buffers and further partition // them into much smaller chunks in order to drive up the number of blocks and overall occupancy. auto const desired_chunk_size = std::size_t{1 * 1024 * 1024}; rmm::device_uvector<thrust::pair<std::size_t, std::size_t>> chunks(num_bufs, stream); thrust::transform( rmm::exec_policy(stream), _d_dst_buf_info, _d_dst_buf_info + num_bufs, chunks.begin(), [desired_chunk_size] __device__( dst_buf_info const& buf) -> thrust::pair<std::size_t, std::size_t> { // Total bytes for this incoming partition std::size_t const bytes = static_cast<std::size_t>(buf.num_elements) * static_cast<std::size_t>(buf.element_size); // This clause handles nested data types (e.g. list or string) that store no data in the row // columns, only in their children. if (bytes == 0) { return {1, 0}; } // The number of chunks we want to subdivide this buffer into std::size_t const num_chunks = max(std::size_t{1}, util::round_up_unsafe(bytes, desired_chunk_size) / desired_chunk_size); // NOTE: leaving chunk size as a separate parameter for future tuning // possibilities, even though in the current implementation it will be a // constant. return {num_chunks, desired_chunk_size}; }); rmm::device_uvector<offset_type> chunk_offsets(num_bufs + 1, stream); auto buf_count_iter = cudf::detail::make_counting_transform_iterator( 0, [num_bufs, num_chunks = num_chunks_func{chunks.begin()}] __device__(size_type i) { return i == num_bufs ? 0 : num_chunks(i); }); thrust::exclusive_scan(rmm::exec_policy(stream), buf_count_iter, buf_count_iter + num_bufs + 1, chunk_offsets.begin(), 0); auto out_to_in_index = [chunk_offsets = chunk_offsets.begin(), num_bufs] __device__(size_type i) { return static_cast<size_type>( thrust::upper_bound(thrust::seq, chunk_offsets, chunk_offsets + num_bufs + 1, i) - chunk_offsets) - 1; }; // apply the chunking. auto const num_chunks = cudf::detail::make_counting_transform_iterator(0, num_chunks_func{chunks.begin()}); size_type const new_buf_count = thrust::reduce(rmm::exec_policy(stream), num_chunks, num_chunks + chunks.size()); rmm::device_uvector<dst_buf_info> d_dst_buf_info(new_buf_count, stream); auto iter = thrust::make_counting_iterator(0); thrust::for_each( rmm::exec_policy(stream), iter, iter + new_buf_count, [_d_dst_buf_info, d_dst_buf_info = d_dst_buf_info.begin(), chunks = chunks.begin(), chunk_offsets = chunk_offsets.begin(), num_bufs, num_src_bufs, out_to_in_index] __device__(size_type i) { size_type const in_buf_index = out_to_in_index(i); size_type const chunk_index = i - chunk_offsets[in_buf_index]; auto const chunk_size = thrust::get<1>(chunks[in_buf_index]); dst_buf_info const& in = _d_dst_buf_info[in_buf_index]; // adjust info dst_buf_info& out = d_dst_buf_info[i]; out.element_size = in.element_size; out.value_shift = in.value_shift; out.bit_shift = in.bit_shift; out.valid_count = in.valid_count; // valid count will be set to 1 if this is a validity buffer out.src_buf_index = in.src_buf_index; out.dst_buf_index = in.dst_buf_index; size_type const elements_per_chunk = out.element_size == 0 ? 0 : chunk_size / out.element_size; out.num_elements = ((chunk_index + 1) * elements_per_chunk) > in.num_elements ? in.num_elements - (chunk_index * elements_per_chunk) : elements_per_chunk; size_type const rows_per_chunk = // if this is a validity buffer, each element is a bitmask_type, which // corresponds to 32 rows. out.valid_count > 0 ? elements_per_chunk * static_cast<size_type>(detail::size_in_bits<bitmask_type>()) : elements_per_chunk; out.num_rows = ((chunk_index + 1) * rows_per_chunk) > in.num_rows ? in.num_rows - (chunk_index * rows_per_chunk) : rows_per_chunk; out.src_element_index = in.src_element_index + (chunk_index * elements_per_chunk); out.dst_offset = in.dst_offset + (chunk_index * chunk_size); // out.bytes and out.buf_size are unneeded here because they are only used to // calculate real output buffer sizes. the data we are generating here is // purely intermediate for the purposes of doing more uniform copying of data // underneath the final structure of the output }); // perform the copy constexpr size_type block_size = 256; copy_partitions<block_size><<<new_buf_count, block_size, 0, stream.value()>>>( d_src_bufs, d_dst_bufs, d_dst_buf_info.data()); // postprocess valid_counts auto keys = cudf::detail::make_counting_transform_iterator( 0, [out_to_in_index] __device__(size_type i) { return out_to_in_index(i); }); auto values = thrust::make_transform_iterator( d_dst_buf_info.begin(), [] __device__(dst_buf_info const& info) { return info.valid_count; }); thrust::reduce_by_key(rmm::exec_policy(stream), keys, keys + new_buf_count, values, thrust::make_discard_iterator(), dst_valid_count_output_iterator{_d_dst_buf_info}); } }; // anonymous namespace namespace detail { std::vector<packed_table> contiguous_split(cudf::table_view const& input, std::vector<size_type> const& splits, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.num_columns() == 0) { return {}; } if (splits.size() > 0) { CUDF_EXPECTS(splits.back() <= input.column(0).size(), "splits can't exceed size of input columns"); } { size_type begin = 0; for (std::size_t i = 0; i < splits.size(); i++) { size_type end = splits[i]; CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative."); CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index."); CUDF_EXPECTS(end <= input.column(0).size(), "Slice range out of bounds."); begin = end; } } std::size_t const num_partitions = splits.size() + 1; std::size_t const num_root_columns = input.num_columns(); // if inputs are empty, just return num_partitions empty tables if (input.column(0).size() == 0) { // sanitize the inputs (to handle corner cases like sliced tables) std::vector<std::unique_ptr<column>> empty_columns; empty_columns.reserve(input.num_columns()); std::transform( input.begin(), input.end(), std::back_inserter(empty_columns), [](column_view const& col) { return cudf::empty_like(col); }); std::vector<cudf::column_view> empty_column_views; empty_column_views.reserve(input.num_columns()); std::transform(empty_columns.begin(), empty_columns.end(), std::back_inserter(empty_column_views), [](std::unique_ptr<column> const& col) { return col->view(); }); table_view empty_inputs(empty_column_views); // build the empty results std::vector<packed_table> result; result.reserve(num_partitions); auto iter = thrust::make_counting_iterator(0); std::transform(iter, iter + num_partitions, std::back_inserter(result), [&empty_inputs](int partition_index) { return packed_table{ empty_inputs, packed_columns{std::make_unique<packed_columns::metadata>(pack_metadata( empty_inputs, static_cast<uint8_t const*>(nullptr), 0)), std::make_unique<rmm::device_buffer>()}}; }); return result; } // compute # of source buffers (column data, validity, children), # of partitions // and total # of buffers size_type const num_src_bufs = count_src_bufs(input.begin(), input.end()); std::size_t const num_bufs = num_src_bufs * num_partitions; // packed block of memory 1. split indices and src_buf_info structs std::size_t const indices_size = cudf::util::round_up_safe((num_partitions + 1) * sizeof(size_type), split_align); std::size_t const src_buf_info_size = cudf::util::round_up_safe(num_src_bufs * sizeof(src_buf_info), split_align); // host-side std::vector<uint8_t> h_indices_and_source_info(indices_size + src_buf_info_size); size_type* h_indices = reinterpret_cast<size_type*>(h_indices_and_source_info.data()); src_buf_info* h_src_buf_info = reinterpret_cast<src_buf_info*>(h_indices_and_source_info.data() + indices_size); // device-side // gpu-only : stack space needed for nested list offset calculation int const offset_stack_partition_size = compute_offset_stack_size(input.begin(), input.end()); std::size_t const offset_stack_size = offset_stack_partition_size * num_partitions * sizeof(size_type); rmm::device_buffer d_indices_and_source_info(indices_size + src_buf_info_size + offset_stack_size, stream, rmm::mr::get_current_device_resource()); auto* d_indices = reinterpret_cast<size_type*>(d_indices_and_source_info.data()); src_buf_info* d_src_buf_info = reinterpret_cast<src_buf_info*>( reinterpret_cast<uint8_t*>(d_indices_and_source_info.data()) + indices_size); size_type* d_offset_stack = reinterpret_cast<size_type*>(reinterpret_cast<uint8_t*>(d_indices_and_source_info.data()) + indices_size + src_buf_info_size); // compute splits -> indices. h_indices[0] = 0; h_indices[num_partitions] = input.column(0).size(); std::copy(splits.begin(), splits.end(), std::next(h_indices)); // setup source buf info setup_source_buf_info(input.begin(), input.end(), h_src_buf_info, h_src_buf_info); // HtoD indices and source buf info to device CUDF_CUDA_TRY(cudaMemcpyAsync(d_indices, h_indices, indices_size + src_buf_info_size, cudaMemcpyHostToDevice, stream.value())); // packed block of memory 2. partition buffer sizes and dst_buf_info structs std::size_t const buf_sizes_size = cudf::util::round_up_safe(num_partitions * sizeof(std::size_t), split_align); std::size_t const dst_buf_info_size = cudf::util::round_up_safe(num_bufs * sizeof(dst_buf_info), split_align); // host-side std::vector<uint8_t> h_buf_sizes_and_dst_info(buf_sizes_size + dst_buf_info_size); std::size_t* h_buf_sizes = reinterpret_cast<std::size_t*>(h_buf_sizes_and_dst_info.data()); dst_buf_info* h_dst_buf_info = reinterpret_cast<dst_buf_info*>(h_buf_sizes_and_dst_info.data() + buf_sizes_size); // device-side rmm::device_buffer d_buf_sizes_and_dst_info( buf_sizes_size + dst_buf_info_size, stream, rmm::mr::get_current_device_resource()); std::size_t* d_buf_sizes = reinterpret_cast<std::size_t*>(d_buf_sizes_and_dst_info.data()); dst_buf_info* d_dst_buf_info = reinterpret_cast<dst_buf_info*>( static_cast<uint8_t*>(d_buf_sizes_and_dst_info.data()) + buf_sizes_size); // compute sizes of each column in each partition, including alignment. thrust::transform( rmm::exec_policy(stream), thrust::make_counting_iterator<std::size_t>(0), thrust::make_counting_iterator<std::size_t>(num_bufs), d_dst_buf_info, [num_src_bufs, d_indices, d_src_buf_info, d_offset_stack, offset_stack_partition_size] __device__(std::size_t t) { int const split_index = t / num_src_bufs; int const src_buf_index = t % num_src_bufs; auto const& src_info = d_src_buf_info[src_buf_index]; // apply nested offsets (lists and string columns). // // We can't just use the incoming row indices to figure out where to read from in a // nested list situation. We have to apply offsets every time we cross a boundary // (list or string). This loop applies those offsets so that our incoming row_index_start // and row_index_end get transformed to our final values. // int const stack_pos = src_info.offset_stack_pos + (split_index * offset_stack_partition_size); size_type* offset_stack = &d_offset_stack[stack_pos]; int parent_offsets_index = src_info.parent_offsets_index; int stack_size = 0; int root_column_offset = src_info.column_offset; while (parent_offsets_index >= 0) { offset_stack[stack_size++] = parent_offsets_index; root_column_offset = d_src_buf_info[parent_offsets_index].column_offset; parent_offsets_index = d_src_buf_info[parent_offsets_index].parent_offsets_index; } // make sure to include the -column- offset on the root column in our calculation. int row_start = d_indices[split_index] + root_column_offset; int row_end = d_indices[split_index + 1] + root_column_offset; while (stack_size > 0) { stack_size--; auto const offsets = d_src_buf_info[offset_stack[stack_size]].offsets; // this case can happen when you have empty string or list columns constructed with // empty_like() if (offsets != nullptr) { row_start = offsets[row_start]; row_end = offsets[row_end]; } } // final element indices and row count int const out_element_index = src_info.is_validity ? row_start / 32 : row_start; int const num_rows = row_end - row_start; // if I am an offsets column, all my values need to be shifted int const value_shift = src_info.offsets == nullptr ? 0 : src_info.offsets[row_start]; // if I am a validity column, we may need to shift bits int const bit_shift = src_info.is_validity ? row_start % 32 : 0; // # of rows isn't necessarily the same as # of elements to be copied. auto const num_elements = [&]() { if (src_info.offsets != nullptr && num_rows > 0) { return num_rows + 1; } else if (src_info.is_validity) { return (num_rows + 31) / 32; } return num_rows; }(); int const element_size = cudf::type_dispatcher(data_type{src_info.type}, size_of_helper{}); std::size_t const bytes = static_cast<std::size_t>(num_elements) * static_cast<std::size_t>(element_size); return dst_buf_info{util::round_up_unsafe(bytes, split_align), num_elements, element_size, num_rows, out_element_index, 0, value_shift, bit_shift, src_info.is_validity ? 1 : 0, src_buf_index, split_index}; }); // compute total size of each partition { // key is split index auto keys = cudf::detail::make_counting_transform_iterator( 0, split_key_functor{static_cast<int>(num_src_bufs)}); auto values = cudf::detail::make_counting_transform_iterator(0, buf_size_functor{d_dst_buf_info}); thrust::reduce_by_key(rmm::exec_policy(stream), keys, keys + num_bufs, values, thrust::make_discard_iterator(), d_buf_sizes); } // compute start offset for each output buffer { auto keys = cudf::detail::make_counting_transform_iterator( 0, split_key_functor{static_cast<int>(num_src_bufs)}); auto values = cudf::detail::make_counting_transform_iterator(0, buf_size_functor{d_dst_buf_info}); thrust::exclusive_scan_by_key(rmm::exec_policy(stream), keys, keys + num_bufs, values, dst_offset_output_iterator{d_dst_buf_info}, std::size_t{0}); } // DtoH buf sizes and col info back to the host CUDF_CUDA_TRY(cudaMemcpyAsync(h_buf_sizes, d_buf_sizes, buf_sizes_size + dst_buf_info_size, cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); // allocate output partition buffers std::vector<rmm::device_buffer> out_buffers; out_buffers.reserve(num_partitions); std::transform(h_buf_sizes, h_buf_sizes + num_partitions, std::back_inserter(out_buffers), [stream, mr](std::size_t bytes) { return rmm::device_buffer{bytes, stream, mr}; }); // packed block of memory 3. pointers to source and destination buffers (and stack space on the // gpu for offset computation) std::size_t const src_bufs_size = cudf::util::round_up_safe(num_src_bufs * sizeof(uint8_t*), split_align); std::size_t const dst_bufs_size = cudf::util::round_up_safe(num_partitions * sizeof(uint8_t*), split_align); // host-side std::vector<uint8_t> h_src_and_dst_buffers(src_bufs_size + dst_bufs_size); uint8_t const** h_src_bufs = reinterpret_cast<uint8_t const**>(h_src_and_dst_buffers.data()); uint8_t** h_dst_bufs = reinterpret_cast<uint8_t**>(h_src_and_dst_buffers.data() + src_bufs_size); // device-side rmm::device_buffer d_src_and_dst_buffers(src_bufs_size + dst_bufs_size + offset_stack_size, stream, rmm::mr::get_current_device_resource()); auto const** d_src_bufs = reinterpret_cast<uint8_t const**>(d_src_and_dst_buffers.data()); uint8_t** d_dst_bufs = reinterpret_cast<uint8_t**>( reinterpret_cast<uint8_t*>(d_src_and_dst_buffers.data()) + src_bufs_size); // setup src buffers setup_src_buf_data(input.begin(), input.end(), h_src_bufs); // setup dst buffers std::transform(out_buffers.begin(), out_buffers.end(), h_dst_bufs, [](auto& buf) { return static_cast<uint8_t*>(buf.data()); }); // HtoD src and dest buffers CUDF_CUDA_TRY(cudaMemcpyAsync( d_src_bufs, h_src_bufs, src_bufs_size + dst_bufs_size, cudaMemcpyHostToDevice, stream.value())); // perform the copy. copy_data(num_bufs, num_src_bufs, d_src_bufs, d_dst_bufs, d_dst_buf_info, stream); // DtoH dst info (to retrieve null counts) CUDF_CUDA_TRY(cudaMemcpyAsync( h_dst_buf_info, d_dst_buf_info, dst_buf_info_size, cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); // build the output. std::vector<packed_table> result; result.reserve(num_partitions); std::vector<column_view> cols; cols.reserve(num_root_columns); auto cur_dst_buf_info = h_dst_buf_info; for (std::size_t idx = 0; idx < num_partitions; idx++) { // traverse the buffers and build the columns. cur_dst_buf_info = build_output_columns( input.begin(), input.end(), cur_dst_buf_info, std::back_inserter(cols), h_dst_bufs[idx]); // pack the columns cudf::table_view t{cols}; result.push_back(packed_table{ t, packed_columns{ std::make_unique<packed_columns::metadata>(cudf::pack_metadata( t, reinterpret_cast<uint8_t const*>(out_buffers[idx].data()), out_buffers[idx].size())), std::make_unique<rmm::device_buffer>(std::move(out_buffers[idx]))}}); cols.clear(); } return result; } }; // namespace detail std::vector<packed_table> contiguous_split(cudf::table_view const& input, std::vector<size_type> const& splits, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contiguous_split(input, splits, cudf::get_default_stream(), mr); } }; // namespace cudf
cd357a8471a2de72261c2b244357e17fdc9c260c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2017 by Contributors * \file roi_align.cu * \brief roi align operator * \author Yuchen Guo, Zehao Shi */ #include "./roi_align-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> namespace mshadow { namespace cuda { template<typename Dtype> __global__ void ROIAlignForwardKernel(const int count, const Dtype* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_data) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; if (roi_batch_ind < 0) { top_data[index] = 0; argmax_data[index] = 0; continue; } Dtype roi_start_w = (bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (bottom_rois[4]) * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; int bottom_index = 0; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype h = hstart+h_stride; h <= hend-h_stride+0.01; h += max(h_stride, 0.01)) { for (Dtype w = wstart+w_stride; w <= wend-w_stride+0.01; w += max(w_stride, 0.01)) { bottom_index ++; int hlow = min(max(static_cast<int>(floor(h)), 0), height-1); int hhigh = hlow + 1; int wleft = min(max(static_cast<int>(floor(w)), 0), width-1); int wright = wleft + 1; int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (h - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (w - wleft) / (wright - wleft); Dtype value = (1 - alpha) * (1 - beta) * bottom_data[topleft] + alpha * (1 - beta) * bottom_data[bottomleft] + (1 - alpha) * beta * bottom_data[topright] + alpha * beta * bottom_data[bottomright]; if (value > maxval) { maxval = value; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = (Dtype)maxidx; } } template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *bottom_data = data.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *top_data = out.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( ROIAlignForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, top_data, argmax_data); } template<typename Dtype> __global__ void ROIAlignBackwardAccKernel(const int count, const Dtype* top_diff, const Dtype* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = (offset_bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (offset_bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (offset_bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Skip if ROI doesn't include (h, w) const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool in_bin = (w > wstart - 1.0 && w < wend + 1.0 && h > hstart - 1.0 && h < hend + 1.0); if (!in_bin) { continue; } const int pool_index = ph * pooled_width + pw; int bottom_index = 0; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype rh = hstart+h_stride; rh <= hend-h_stride+0.01; rh += max(h_stride, 0.01)) { for (Dtype rw = wstart+w_stride; rw <= wend-w_stride+0.01; rw += max(w_stride, 0.01)) { bottom_index ++; if (offset_argmax_data[pool_index] != bottom_index) continue; // compute the integer coordinates around (h, w) for bilinear interpolation int hlow = min(max(static_cast<int>(floor(rh)), 0), height-1); int hhigh = hlow + 1; int wleft = min(max(static_cast<int>(floor(rw)), 0), width-1); int wright = wleft + 1; if (h != hlow && h != hhigh && w != wleft && w != wright) // (w, h) is not around (rw, rh) continue; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (rh - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (rw - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_top_diff[pool_index] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_top_diff[pool_index] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_top_diff[pool_index] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_top_diff[pool_index] * alpha * beta; } } } } } bottom_diff[index] += gradient; } } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *top_diff = out_grad.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *bottom_diff = in_grad.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = in_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward"); hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); hipLaunchKernelGGL(( ROIAlignBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_diff, bottom_rois); } } // namespace cuda template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignForward(out, data, bbox, max_idx, spatial_scale); } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(ROIAlignParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new ROIAlignOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
cd357a8471a2de72261c2b244357e17fdc9c260c.cu
/*! * Copyright (c) 2017 by Contributors * \file roi_align.cu * \brief roi align operator * \author Yuchen Guo, Zehao Shi */ #include "./roi_align-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> namespace mshadow { namespace cuda { template<typename Dtype> __global__ void ROIAlignForwardKernel(const int count, const Dtype* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_data) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; if (roi_batch_ind < 0) { top_data[index] = 0; argmax_data[index] = 0; continue; } Dtype roi_start_w = (bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (bottom_rois[4]) * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; int bottom_index = 0; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype h = hstart+h_stride; h <= hend-h_stride+0.01; h += max(h_stride, 0.01)) { for (Dtype w = wstart+w_stride; w <= wend-w_stride+0.01; w += max(w_stride, 0.01)) { bottom_index ++; int hlow = min(max(static_cast<int>(floor(h)), 0), height-1); int hhigh = hlow + 1; int wleft = min(max(static_cast<int>(floor(w)), 0), width-1); int wright = wleft + 1; int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (h - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (w - wleft) / (wright - wleft); Dtype value = (1 - alpha) * (1 - beta) * bottom_data[topleft] + alpha * (1 - beta) * bottom_data[bottomleft] + (1 - alpha) * beta * bottom_data[topright] + alpha * beta * bottom_data[bottomright]; if (value > maxval) { maxval = value; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = (Dtype)maxidx; } } template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *bottom_data = data.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *top_data = out.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); ROIAlignForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, top_data, argmax_data); } template<typename Dtype> __global__ void ROIAlignBackwardAccKernel(const int count, const Dtype* top_diff, const Dtype* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = (offset_bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (offset_bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (offset_bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Skip if ROI doesn't include (h, w) const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool in_bin = (w > wstart - 1.0 && w < wend + 1.0 && h > hstart - 1.0 && h < hend + 1.0); if (!in_bin) { continue; } const int pool_index = ph * pooled_width + pw; int bottom_index = 0; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype rh = hstart+h_stride; rh <= hend-h_stride+0.01; rh += max(h_stride, 0.01)) { for (Dtype rw = wstart+w_stride; rw <= wend-w_stride+0.01; rw += max(w_stride, 0.01)) { bottom_index ++; if (offset_argmax_data[pool_index] != bottom_index) continue; // compute the integer coordinates around (h, w) for bilinear interpolation int hlow = min(max(static_cast<int>(floor(rh)), 0), height-1); int hhigh = hlow + 1; int wleft = min(max(static_cast<int>(floor(rw)), 0), width-1); int wright = wleft + 1; if (h != hlow && h != hhigh && w != wleft && w != wright) // (w, h) is not around (rw, rh) continue; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (rh - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (rw - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_top_diff[pool_index] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_top_diff[pool_index] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_top_diff[pool_index] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_top_diff[pool_index] * alpha * beta; } } } } } bottom_diff[index] += gradient; } } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *top_diff = out_grad.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *bottom_diff = in_grad.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = in_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward"); cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); ROIAlignBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>( count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_diff, bottom_rois); } } // namespace cuda template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignForward(out, data, bbox, max_idx, spatial_scale); } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(ROIAlignParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new ROIAlignOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
941b42a2a280c21f3043a3e2d82f91fef75514bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/preconditioner/jacobi_kernels.hpp" #include <ginkgo/core/base/exception_helpers.hpp> #include "core/base/extended_float.hpp" #include "core/matrix/dense_kernels.hpp" #include "core/preconditioner/jacobi_utils.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/components/warp_blas.cuh" #include "cuda/preconditioner/jacobi_common.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The Jacobi preconditioner namespace. * @ref Jacobi * @ingroup jacobi */ namespace jacobi { namespace kernel { template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) advanced_apply(const ValueType *__restrict__ blocks, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, const IndexType *__restrict__ block_ptrs, size_type num_blocks, const ValueType *__restrict__ alpha, const ValueType *__restrict__ b, int32 b_stride, ValueType *__restrict__ x, int32 x_stride) { const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto subwarp = group::tiled_partition<subwarp_size>(group::this_thread_block()); if (block_id >= num_blocks) { return; } const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id]; ValueType v = zero<ValueType>(); if (subwarp.thread_rank() < block_size) { v = alpha[0] * b[(block_ptrs[block_id] + subwarp.thread_rank()) * b_stride]; } multiply_vec<max_block_size>( subwarp, block_size, v, blocks + storage_scheme.get_global_block_offset(block_id) + subwarp.thread_rank(), storage_scheme.get_stride(), x + block_ptrs[block_id] * x_stride, x_stride, [](ValueType &result, const ValueType &out) { result += out; }); } template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) advanced_adaptive_apply( const ValueType *__restrict__ blocks, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, const precision_reduction *__restrict__ block_precisions, const IndexType *__restrict__ block_ptrs, size_type num_blocks, const ValueType *__restrict__ alpha, const ValueType *__restrict__ b, int32 b_stride, ValueType *__restrict__ x, int32 x_stride) { const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto subwarp = group::tiled_partition<subwarp_size>(group::this_thread_block()); if (block_id >= num_blocks) { return; } const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id]; auto alpha_val = alpha == nullptr ? one<ValueType>() : alpha[0]; ValueType v = zero<ValueType>(); if (subwarp.thread_rank() < block_size) { v = alpha[0] * b[(block_ptrs[block_id] + subwarp.thread_rank()) * b_stride]; } GKO_PRECONDITIONER_JACOBI_RESOLVE_PRECISION( ValueType, block_precisions[block_id], multiply_vec<max_block_size>( subwarp, block_size, v, reinterpret_cast<const resolved_precision *>( blocks + storage_scheme.get_group_offset(block_id)) + storage_scheme.get_block_offset(block_id) + subwarp.thread_rank(), storage_scheme.get_stride(), x + block_ptrs[block_id] * x_stride, x_stride, [](ValueType &result, const ValueType &out) { result += out; })); } } // namespace kernel namespace { template <int warps_per_block, int max_block_size, typename ValueType, typename IndexType> void advanced_apply( syn::value_list<int, max_block_size>, size_type num_blocks, const precision_reduction *block_precisions, const IndexType *block_pointers, const ValueType *blocks, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, const ValueType *alpha, const ValueType *b, size_type b_stride, ValueType *x, size_type x_stride) { constexpr int subwarp_size = get_larger_power(max_block_size); constexpr int blocks_per_warp = cuda_config::warp_size / subwarp_size; const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp), 1, 1); const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block); if (block_precisions) { hipLaunchKernelGGL(( kernel::advanced_adaptive_apply<max_block_size, subwarp_size, warps_per_block>) , dim3(grid_size), dim3(block_size), 0, 0, as_cuda_type(blocks), storage_scheme, block_precisions, block_pointers, num_blocks, as_cuda_type(alpha), as_cuda_type(b), b_stride, as_cuda_type(x), x_stride); } else { hipLaunchKernelGGL(( kernel::advanced_apply<max_block_size, subwarp_size, warps_per_block>) , dim3(grid_size), dim3(block_size), 0, 0, as_cuda_type(blocks), storage_scheme, block_pointers, num_blocks, as_cuda_type(alpha), as_cuda_type(b), b_stride, as_cuda_type(x), x_stride); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_advanced_apply, advanced_apply); } // namespace template <typename ValueType, typename IndexType> void apply(std::shared_ptr<const CudaExecutor> exec, size_type num_blocks, uint32 max_block_size, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, const Array<precision_reduction> &block_precisions, const Array<IndexType> &block_pointers, const Array<ValueType> &blocks, const matrix::Dense<ValueType> *alpha, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *x) { // TODO: write a special kernel for multiple RHS dense::scale(exec, beta, x); for (size_type col = 0; col < b->get_size()[1]; ++col) { select_advanced_apply( compiled_kernels(), [&](int compiled_block_size) { return max_block_size <= compiled_block_size; }, syn::value_list<int, cuda_config::min_warps_per_block>(), syn::type_list<>(), num_blocks, block_precisions.get_const_data(), block_pointers.get_const_data(), blocks.get_const_data(), storage_scheme, alpha->get_const_values(), b->get_const_values() + col, b->get_stride(), x->get_values() + col, x->get_stride()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_JACOBI_APPLY_KERNEL); } // namespace jacobi } // namespace cuda } // namespace kernels } // namespace gko
941b42a2a280c21f3043a3e2d82f91fef75514bb.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/preconditioner/jacobi_kernels.hpp" #include <ginkgo/core/base/exception_helpers.hpp> #include "core/base/extended_float.hpp" #include "core/matrix/dense_kernels.hpp" #include "core/preconditioner/jacobi_utils.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/components/warp_blas.cuh" #include "cuda/preconditioner/jacobi_common.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The Jacobi preconditioner namespace. * @ref Jacobi * @ingroup jacobi */ namespace jacobi { namespace kernel { template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) advanced_apply(const ValueType *__restrict__ blocks, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, const IndexType *__restrict__ block_ptrs, size_type num_blocks, const ValueType *__restrict__ alpha, const ValueType *__restrict__ b, int32 b_stride, ValueType *__restrict__ x, int32 x_stride) { const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto subwarp = group::tiled_partition<subwarp_size>(group::this_thread_block()); if (block_id >= num_blocks) { return; } const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id]; ValueType v = zero<ValueType>(); if (subwarp.thread_rank() < block_size) { v = alpha[0] * b[(block_ptrs[block_id] + subwarp.thread_rank()) * b_stride]; } multiply_vec<max_block_size>( subwarp, block_size, v, blocks + storage_scheme.get_global_block_offset(block_id) + subwarp.thread_rank(), storage_scheme.get_stride(), x + block_ptrs[block_id] * x_stride, x_stride, [](ValueType &result, const ValueType &out) { result += out; }); } template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) advanced_adaptive_apply( const ValueType *__restrict__ blocks, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, const precision_reduction *__restrict__ block_precisions, const IndexType *__restrict__ block_ptrs, size_type num_blocks, const ValueType *__restrict__ alpha, const ValueType *__restrict__ b, int32 b_stride, ValueType *__restrict__ x, int32 x_stride) { const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto subwarp = group::tiled_partition<subwarp_size>(group::this_thread_block()); if (block_id >= num_blocks) { return; } const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id]; auto alpha_val = alpha == nullptr ? one<ValueType>() : alpha[0]; ValueType v = zero<ValueType>(); if (subwarp.thread_rank() < block_size) { v = alpha[0] * b[(block_ptrs[block_id] + subwarp.thread_rank()) * b_stride]; } GKO_PRECONDITIONER_JACOBI_RESOLVE_PRECISION( ValueType, block_precisions[block_id], multiply_vec<max_block_size>( subwarp, block_size, v, reinterpret_cast<const resolved_precision *>( blocks + storage_scheme.get_group_offset(block_id)) + storage_scheme.get_block_offset(block_id) + subwarp.thread_rank(), storage_scheme.get_stride(), x + block_ptrs[block_id] * x_stride, x_stride, [](ValueType &result, const ValueType &out) { result += out; })); } } // namespace kernel namespace { template <int warps_per_block, int max_block_size, typename ValueType, typename IndexType> void advanced_apply( syn::value_list<int, max_block_size>, size_type num_blocks, const precision_reduction *block_precisions, const IndexType *block_pointers, const ValueType *blocks, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, const ValueType *alpha, const ValueType *b, size_type b_stride, ValueType *x, size_type x_stride) { constexpr int subwarp_size = get_larger_power(max_block_size); constexpr int blocks_per_warp = cuda_config::warp_size / subwarp_size; const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp), 1, 1); const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block); if (block_precisions) { kernel::advanced_adaptive_apply<max_block_size, subwarp_size, warps_per_block> <<<grid_size, block_size, 0, 0>>>( as_cuda_type(blocks), storage_scheme, block_precisions, block_pointers, num_blocks, as_cuda_type(alpha), as_cuda_type(b), b_stride, as_cuda_type(x), x_stride); } else { kernel::advanced_apply<max_block_size, subwarp_size, warps_per_block> <<<grid_size, block_size, 0, 0>>>( as_cuda_type(blocks), storage_scheme, block_pointers, num_blocks, as_cuda_type(alpha), as_cuda_type(b), b_stride, as_cuda_type(x), x_stride); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_advanced_apply, advanced_apply); } // namespace template <typename ValueType, typename IndexType> void apply(std::shared_ptr<const CudaExecutor> exec, size_type num_blocks, uint32 max_block_size, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, const Array<precision_reduction> &block_precisions, const Array<IndexType> &block_pointers, const Array<ValueType> &blocks, const matrix::Dense<ValueType> *alpha, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *x) { // TODO: write a special kernel for multiple RHS dense::scale(exec, beta, x); for (size_type col = 0; col < b->get_size()[1]; ++col) { select_advanced_apply( compiled_kernels(), [&](int compiled_block_size) { return max_block_size <= compiled_block_size; }, syn::value_list<int, cuda_config::min_warps_per_block>(), syn::type_list<>(), num_blocks, block_precisions.get_const_data(), block_pointers.get_const_data(), blocks.get_const_data(), storage_scheme, alpha->get_const_values(), b->get_const_values() + col, b->get_stride(), x->get_values() + col, x->get_stride()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_JACOBI_APPLY_KERNEL); } // namespace jacobi } // namespace cuda } // namespace kernels } // namespace gko
9befd6e2e8c07d41405fcab5af258f819ed3922a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <string> //#include "../../common/cuda/profile_main.h" #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char **argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(const char* s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, const std::string file) { int i, j, index = 0; FILE *fp; char str[STR_SIZE]; if ((fp = fopen(file.c_str(), "w")) == 0) printf("The file was not opened\n"); for (i = 0; i < grid_rows; i++) for (j = 0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j]); fputs(str, fp); index++; } fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file) { int i, j; FILE *fp; char str[STR_SIZE]; float val; if ((fp = fopen(file, "r")) == 0) printf("The file was not opened\n"); for (i = 0; i <= grid_rows - 1; i++) for (j = 0; j <= grid_cols - 1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); // if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != // ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i * grid_cols + j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max)) #define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x) #define MIN(a, b) ((a) <= (b) ? (a) : (b)) __global__ void calculate_temp(int iteration, // number of iteration float *power, // power input float *temp_src, // temperature input/output float *temp_dst, // temperature input/output int grid_cols, // Col of grid int grid_rows, // Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, // Capacitance float Rx, float Ry, float Rz, float step) { __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1, Ry_1, Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; step_div_Cap = step / Cap; Rx_1 = 1 / Rx; Ry_1 = 1 / Ry; Rz_1 = 1 / Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE - iteration * 2; // EXPAND_RATE int small_block_cols = BLOCK_SIZE - iteration * 2; // EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows * by - border_rows; int blkX = small_block_cols * bx - border_cols; int blkYmax = blkY + BLOCK_SIZE - 1; int blkXmax = blkX + BLOCK_SIZE - 1; // calculate the global thread coordination int yidx = blkY + ty; int xidx = blkX + tx; // load data if it is within the valid input range int loadYidx = yidx, loadXidx = xidx; int index = grid_cols * loadYidx + loadXidx; if (IN_RANGE(loadYidx, 0, grid_rows - 1) && IN_RANGE(loadXidx, 0, grid_cols - 1)) { temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data // from global memory to shared // memory power_on_cuda[ty][tx] = power[index]; // Load the power data from global // memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows - 1) ? BLOCK_SIZE - 1 - (blkYmax - grid_rows + 1) : BLOCK_SIZE - 1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - grid_cols + 1) : BLOCK_SIZE - 1; int N = ty - 1; int S = ty + 1; int W = tx - 1; int E = tx + 1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i = 0; i < iteration; i++) { computed = false; if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && IN_RANGE(ty, i + 1, BLOCK_SIZE - i - 2) && IN_RANGE(tx, validXmin, validXmax) && IN_RANGE(ty, validYmin, validYmax)) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0 * temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0 * temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if (i == iteration - 1) break; if (computed) // Assign the computation range temp_on_cuda[ty][tx] = temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed) { temp_dst[index] = temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower, float *MatrixTemp[2], int col, int row, int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; int src = 1, dst = 0; for (t = 0; t < total_iterations; t += num_iterations) { int temp = src; src = dst; dst = temp; //PROFILE(( hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations - t), MatrixPower, MatrixTemp[src], MatrixTemp[dst], col, row, borderCols, borderRows, Cap, Rx, Ry, Rz, step); //)); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> " "<sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the " "grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial " "temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the " "dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } void real_main(int argc, char **argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); int size; int grid_rows, grid_cols; float *FilesavingTemp, *FilesavingPower, *MatrixOut; char *tfile, *pfile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 6) usage(argc, argv); if ((grid_rows = atoi(argv[1])) <= 0 || (grid_cols = atoi(argv[1])) <= 0 || (pyramid_height = atoi(argv[2])) <= 0 || (total_iterations = atoi(argv[3])) <= 0) usage(argc, argv); tfile = argv[4]; pfile = argv[5]; size = grid_rows * grid_cols; hipSetDevice(1); /* --------------- pyramid parameters --------------- */ #define EXPAND_RATE 2 // add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE / 2; int borderRows = (pyramid_height)*EXPAND_RATE / 2; int smallBlockCol = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE; int blockCols = grid_cols / smallBlockCol + ((grid_cols % smallBlockCol == 0) ? 0 : 1); int blockRows = grid_rows / smallBlockRow + ((grid_rows % smallBlockRow == 0) ? 0 : 1); FilesavingTemp = (float *)malloc(size * sizeof(float)); FilesavingPower = (float *)malloc(size * sizeof(float)); MatrixOut = (float *)calloc(size, sizeof(float)); if (!FilesavingPower || !FilesavingTemp || !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, " "%d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n", pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); float *MatrixTemp[2], *MatrixPower; hipMalloc((void **)&MatrixTemp[0], sizeof(float) * size); hipMalloc((void **)&MatrixTemp[1], sizeof(float) * size); hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float) * size, hipMemcpyHostToDevice); hipMalloc((void **)&MatrixPower, sizeof(float) * size); hipMemcpy(MatrixPower, FilesavingPower, sizeof(float) * size, hipMemcpyHostToDevice); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower, MatrixTemp, grid_cols, grid_rows, total_iterations, pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float) * size, hipMemcpyDeviceToHost); if (getenv("OUTPUT")) writeoutput(MatrixOut, grid_rows, grid_cols, "output.txt"); hipFree(MatrixPower); hipFree(MatrixTemp[0]); hipFree(MatrixTemp[1]); free(MatrixOut); } int main(int argc, char **argv) { //if (getenv("PROFILE")) // profile_start(); real_main(argc, argv); //if (getenv("PROFILE")) // profile_stop(); return EXIT_SUCCESS; }
9befd6e2e8c07d41405fcab5af258f819ed3922a.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <string> //#include "../../common/cuda/profile_main.h" #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char **argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(const char* s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, const std::string file) { int i, j, index = 0; FILE *fp; char str[STR_SIZE]; if ((fp = fopen(file.c_str(), "w")) == 0) printf("The file was not opened\n"); for (i = 0; i < grid_rows; i++) for (j = 0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j]); fputs(str, fp); index++; } fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file) { int i, j; FILE *fp; char str[STR_SIZE]; float val; if ((fp = fopen(file, "r")) == 0) printf("The file was not opened\n"); for (i = 0; i <= grid_rows - 1; i++) for (j = 0; j <= grid_cols - 1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); // if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != // ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i * grid_cols + j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max)) #define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x) #define MIN(a, b) ((a) <= (b) ? (a) : (b)) __global__ void calculate_temp(int iteration, // number of iteration float *power, // power input float *temp_src, // temperature input/output float *temp_dst, // temperature input/output int grid_cols, // Col of grid int grid_rows, // Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, // Capacitance float Rx, float Ry, float Rz, float step) { __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1, Ry_1, Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; step_div_Cap = step / Cap; Rx_1 = 1 / Rx; Ry_1 = 1 / Ry; Rz_1 = 1 / Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE - iteration * 2; // EXPAND_RATE int small_block_cols = BLOCK_SIZE - iteration * 2; // EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows * by - border_rows; int blkX = small_block_cols * bx - border_cols; int blkYmax = blkY + BLOCK_SIZE - 1; int blkXmax = blkX + BLOCK_SIZE - 1; // calculate the global thread coordination int yidx = blkY + ty; int xidx = blkX + tx; // load data if it is within the valid input range int loadYidx = yidx, loadXidx = xidx; int index = grid_cols * loadYidx + loadXidx; if (IN_RANGE(loadYidx, 0, grid_rows - 1) && IN_RANGE(loadXidx, 0, grid_cols - 1)) { temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data // from global memory to shared // memory power_on_cuda[ty][tx] = power[index]; // Load the power data from global // memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows - 1) ? BLOCK_SIZE - 1 - (blkYmax - grid_rows + 1) : BLOCK_SIZE - 1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - grid_cols + 1) : BLOCK_SIZE - 1; int N = ty - 1; int S = ty + 1; int W = tx - 1; int E = tx + 1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i = 0; i < iteration; i++) { computed = false; if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && IN_RANGE(ty, i + 1, BLOCK_SIZE - i - 2) && IN_RANGE(tx, validXmin, validXmax) && IN_RANGE(ty, validYmin, validYmax)) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0 * temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0 * temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if (i == iteration - 1) break; if (computed) // Assign the computation range temp_on_cuda[ty][tx] = temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed) { temp_dst[index] = temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower, float *MatrixTemp[2], int col, int row, int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; int src = 1, dst = 0; for (t = 0; t < total_iterations; t += num_iterations) { int temp = src; src = dst; dst = temp; //PROFILE(( calculate_temp<<<dimGrid, dimBlock>>>( MIN(num_iterations, total_iterations - t), MatrixPower, MatrixTemp[src], MatrixTemp[dst], col, row, borderCols, borderRows, Cap, Rx, Ry, Rz, step); //)); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> " "<sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the " "grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial " "temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the " "dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } void real_main(int argc, char **argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); int size; int grid_rows, grid_cols; float *FilesavingTemp, *FilesavingPower, *MatrixOut; char *tfile, *pfile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 6) usage(argc, argv); if ((grid_rows = atoi(argv[1])) <= 0 || (grid_cols = atoi(argv[1])) <= 0 || (pyramid_height = atoi(argv[2])) <= 0 || (total_iterations = atoi(argv[3])) <= 0) usage(argc, argv); tfile = argv[4]; pfile = argv[5]; size = grid_rows * grid_cols; cudaSetDevice(1); /* --------------- pyramid parameters --------------- */ #define EXPAND_RATE 2 // add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE / 2; int borderRows = (pyramid_height)*EXPAND_RATE / 2; int smallBlockCol = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE; int blockCols = grid_cols / smallBlockCol + ((grid_cols % smallBlockCol == 0) ? 0 : 1); int blockRows = grid_rows / smallBlockRow + ((grid_rows % smallBlockRow == 0) ? 0 : 1); FilesavingTemp = (float *)malloc(size * sizeof(float)); FilesavingPower = (float *)malloc(size * sizeof(float)); MatrixOut = (float *)calloc(size, sizeof(float)); if (!FilesavingPower || !FilesavingTemp || !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, " "%d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n", pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); float *MatrixTemp[2], *MatrixPower; cudaMalloc((void **)&MatrixTemp[0], sizeof(float) * size); cudaMalloc((void **)&MatrixTemp[1], sizeof(float) * size); cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float) * size, cudaMemcpyHostToDevice); cudaMalloc((void **)&MatrixPower, sizeof(float) * size); cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float) * size, cudaMemcpyHostToDevice); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower, MatrixTemp, grid_cols, grid_rows, total_iterations, pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float) * size, cudaMemcpyDeviceToHost); if (getenv("OUTPUT")) writeoutput(MatrixOut, grid_rows, grid_cols, "output.txt"); cudaFree(MatrixPower); cudaFree(MatrixTemp[0]); cudaFree(MatrixTemp[1]); free(MatrixOut); } int main(int argc, char **argv) { //if (getenv("PROFILE")) // profile_start(); real_main(argc, argv); //if (getenv("PROFILE")) // profile_stop(); return EXIT_SUCCESS; }
dd3700e84eb7149a6cdd66013c52b037b01b4c7d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" using namespace std; #define min(x,y) ((x)<(y)?(x):(y)) #define max(x,y) ((x)>(y)?(x):(y)) #define dist(x,y) ((x-y)*(x-y)) #define INF 1e10 //Pseudo Infitinte number for this code /// Calculate quick lower bound /// Usually, LB_Kim take time O(m) for finding top,bottom,fist and last. /// However, because of z-normalization the top and bottom cannot give siginifant benefits. /// And using the first and last points can be computed in constant time. /// The prunning power of LB_Kim is non-trivial, especially when the query is not long, say in length 128. /////////////////////Added to use constant memory/////////////// //extern __constant__ double q[]; ////////////////////////////////////////////////////// ///////////////////// Added to include locs /////////////////// __device__ int lock_Variable = 0; //0 loc open, 1 closed ///////////////////////////////////// __device__ double lb_kim_hierarchy(double *t, double *q, int j, int len, double mean, double std, double bsf = INF) { /// 1 point at front and back double d, lb; double x0 = (t[j] - mean) / std; double y0 = (t[(len - 1 + j)] - mean) / std; lb = dist(x0,q[0]) + dist(y0,q[len-1]); if (lb >= bsf) return lb; /// 2 points at front double x1 = (t[(j + 1)] - mean) / std; d = min(dist(x1,q[0]), dist(x0,q[1])); d = min(d, dist(x1,q[1])); lb += d; if (lb >= bsf) return lb; /// 2 points at back double y1 = (t[(len - 2 + j)] - mean) / std; d = min(dist(y1,q[len-1]), dist(y0, q[len-2]) ); d = min(d, dist(y1,q[len-2])); lb += d; if (lb >= bsf) return lb; /// 3 points at front double x2 = (t[(j + 2)] - mean) / std; d = min(dist(x0,q[2]), dist(x1, q[2])); d = min(d, dist(x2,q[2])); d = min(d, dist(x2,q[1])); d = min(d, dist(x2,q[0])); lb += d; if (lb >= bsf) return lb; /// 3 points at back double y2 = (t[(len - 3 + j)] - mean) / std; d = min(dist(y0,q[len-3]), dist(y1, q[len-3])); d = min(d, dist(y2,q[len-3])); d = min(d, dist(y2,q[len-2])); d = min(d, dist(y2,q[len-1])); lb += d; return lb; } __device__ double dtw(double* A, double* B, int m, int r, double* costM, double* cost_prevM, int bsfindex, double bsf = INF) { double *cost_tmp; int i, j, k; double x, y, z, min_cost; int start = bsfindex * (2 * r + 1); double* cost = costM + start; double*cost_prev = cost_prevM + start; /// Instead of using matrix of size O(m^2) or O(mr), we will reuse two array of size O(r). // hipMalloc((void**)&cost, (2*r+1) * sizeof(double)); for (k = 0; k < 2 * r + 1; k++) cost[k] = INF; // hipMalloc((void**)&cost_prev, (2*r+1) * sizeof(double)); for (k = 0; k < 2 * r + 1; k++) cost_prev[k] = INF; for (i = 0; i < m; i++) { k = max(0,r-i); min_cost = INF; for (j = max(0,i-r); j <= min(m-1,i+r); j++, k++) { /// Initialize all row and column if ((i == 0) && (j == 0)) { cost[k] = dist(A[0],B[0]); min_cost = cost[k]; continue; } if ((j - 1 < 0) || (k - 1 < 0)) y = INF; else y = cost[k - 1]; if ((i - 1 < 0) || (k + 1 > 2 * r)) x = INF; else x = cost_prev[k + 1]; if ((i - 1 < 0) || (j - 1 < 0)) z = INF; else z = cost_prev[k]; /// Classic DTW calculation cost[k] = min( min( x, y) , z) + dist(A[i],B[j]); /// Find minimum cost in row for early abandoning (possibly to use column instead of row). if (cost[k] < min_cost) { min_cost = cost[k]; } } /// We can abandon early if the current cummulative distace with lower bound together are larger than bsf if (i + r < m - 1 && min_cost >= bsf) { return min_cost; } /// Move current array to previous array. cost_tmp = cost; cost = cost_prev; cost_prev = cost_tmp; } k--; /// the DTW distance is in the last cell in the matrix of size O(m^2) or at the middle of our array. double final_dtw = cost_prev[k]; return final_dtw; } __global__ void processKernel(double* queue, double* buffer, double* cost, double* cost_prev, double* bsf_a, int* loc_a, double* tM, double* tzM, int m, int r, double bsf, int size, int EPOCH) { extern __shared__ double q[]; int shared_index = threadIdx.x; while(shared_index < m){ q[shared_index] = queue[shared_index]; shared_index += blockDim.x; } //printf("Hello"); int N = gridDim.x; int M = blockDim.x; int i = blockIdx.x; int j = threadIdx.x; int items_per_a = EPOCH / (N * M); int maxindex = (size - 1) / items_per_a; double lb_kim; int bsfindex = i * M + j; int sindex = bsfindex * items_per_a; int loc; int k; double d; double *t, *tz; double ex, ex2, mean, std, dist; t = tM + bsfindex * 2 * m; tz = tzM + bsfindex * 2 * m; /// Initial the cummulative lower bound ex = 0; ex2 = 0; int offset = m; if (bsfindex == maxindex) offset = 0; if (bsfindex <= maxindex) for (i = 0; i < items_per_a + offset; i++) { d = (double) buffer[sindex + i]; ex += d; ex2 += d * d; t[i % m] = d; t[(i % m) + m] = d; /// If there is enough data in t, the DTW distance can be calculated if (i >= m - 1) { mean = ex / m; std = ex2 / m; std = sqrt(std - mean * mean); /// compute the start location of the data in the current circular array, t j = (i + 1) % m; /// Use a constant lower bound to prune the obvious subsequence lb_kim = lb_kim_hierarchy(t, q, j, m, mean, std, bsf); if (lb_kim < bsf) { for (k = 0; k < m; k++) { tz[k] = (t[(k + j)] - mean) / std; } dist = dtw(tz, q, m, r, cost, cost_prev, bsfindex, bsf); ////////////////////////////// Implementing locks ////////////////////////////////// ///Previous code // // if (dist < bsf) { /// Update bsf // /// loc is the real starting location of the nearest neighbor in the file // bsf = dist; // loc = sindex + i; // } /////////End of previous code ///////// Implementing loc if (dist < bsf) { bool loop = true; while (loop) { if (atomicCAS(&lock_Variable, 0, 1)) { //If loc open (loc == 0) then close it (make it equal to 1) if (dist < bsf) { bsf = dist; loc = sindex + i; } lock_Variable = 0; loop = false; } } } /////////////////////////////////////////////////////////////////////////////////// } /// Reduce obsolute points from sum and sum square ex -= t[j]; ex2 -= t[j] * t[j]; } } bsf_a[bsfindex] = bsf; loc_a[bsfindex] = loc; //Some issue which popped up now . } void error(int id) { if (id == 1) printf("ERROR : Memory can't be allocated!!!\n\n"); else if (id == 2) printf("ERROR : File not Found!!!\n\n"); else if (id == 3) printf("ERROR : Can't create Output File!!!\n\n"); else if (id == 4) { printf("ERROR : Invalid Number of Arguments!!!\n"); printf( "Command Usage: UCR_DTW.exe data-file query-file m R\n\n"); printf( "For example : UCR_DTW.exe data.txt query.txt 128 0.05\n"); } exit(1); } /// Main Function int main(int argc, char *argv[]) { FILE *fp; /// data file pointer FILE *qp; /// query file pointer double bsf = INF; /// best-so-far double *h_q; /// data array and query array clock_t begin, end; double time_spent; double d; long long i; double ex, ex2, mean, std; int m = -1, r = -1; long long loc = 0; // double t1//, t2; //int kim = 0, keogh = 0, keogh2 = 0; double *h_buffer; int N = 10, M = 100; int sh= 0; /// For every EPOCH points, all cummulative values, such as ex (sum), ex2 (sum square), will be restarted for reducing the doubleing point error. int EPOCH = 1000000; int epoch; //Optimization /// If not enough input, display an error. if (argc <= 3) error(4); /// read size of the query if (argc > 3) m = atol(argv[3]); /// read warping windows if (argc > 4) { double R = atof(argv[4]); if (R <= 1) r = floor(R * m); else r = floor(R); } if (argc > 7) { N = atoi(argv[5]); M = atoi(argv[6]); EPOCH = atol(argv[7]); } // m = 128; // r = 6; fp = fopen(argv[1], "rb"); // fp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Data.txt", "r"); // if( fp == NULL ) // error(2); qp = fopen(argv[2], "r"); // qp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Query.txt", "r"); // if( qp == NULL ) // error(2); /// start the clock //t1 = clock(); /// malloc everything here h_q = (double *) malloc(sizeof(double) * m); if (h_q == NULL) error(1); h_buffer = (double *) malloc(sizeof(double) * (EPOCH)); if (h_buffer == NULL) error(1); /// Read query file bsf = INF; i = 0; ex = ex2 = 0; while (fscanf(qp, "%lf", &d) != EOF && i < m) { ex += d; ex2 += d * d; h_q[i] = d; i++; } fclose(qp); /// Do z-normalize the query, keep in same array, q mean = ex / m; std = ex2 / m; std = sqrt(std - mean * mean); for (i = 0; i < m; i++) h_q[i] = (h_q[i] - mean) / std; int size = N * M; double* h_bsf = (double *) malloc(sizeof(double) * size); int* h_loc = (int *) malloc(sizeof(int) * size); for (i = 0; i < size; i++) { h_bsf[i] = INF; h_loc[i] = 0; } //Allocate all the cuda Stuffs double *d_q; double *d_buffer, *d_bsf; double *d_cost, *d_cost_prev; double *d_t, *d_tz; int* d_loc; hipMalloc((void**) &d_buffer, (EPOCH) * sizeof(double)); hipMalloc((void**) &d_cost, (2 * r + 1) * size * sizeof(double)); hipMalloc((void**) &d_cost_prev, (2 * r + 1) * size * sizeof(double)); hipMalloc((void**) &d_bsf, size * sizeof(double)); hipMalloc((void**) &d_t, 2 * m * size * sizeof(double)); hipMalloc((void**) &d_tz, 2 * m * size * sizeof(double)); hipMalloc((void**) &d_q, m * sizeof(double)); hipMalloc((void**) &d_loc, size * sizeof(int)); ///Copying BSF array hipMemcpy(d_bsf, h_bsf, m * sizeof(double), hipMemcpyHostToDevice); ///Copy all the Query related arrays hipMemcpy(d_q, h_q, m * sizeof(double), hipMemcpyHostToDevice); bool done = false; bool last = false; int it = 0, ep = 0, k = 0; //begin = clock(); while (!done) { /// Read first m-1 points if (it == 0) { epoch = 100000; while (ep < epoch) { if (!fread(&d,sizeof(double),1,fp)){ //printf("%lf\n",d ); break; } h_buffer[ep] = d; ep++; } } /// Data are read in chunk of size EPOCH. /// When there is nothing to read, the loop is end. if (ep <= m - 1) { done = true; } else { if (last) { done = true; } //printf("Reading Done.\n"); sh ++; //begin = clock(); hipMemcpy(d_buffer, h_buffer, ep * sizeof(double), hipMemcpyHostToDevice); // to copy from CPU to GPU hipDeviceSynchronize(); //end = clock(); //time_spent = (double) (end - begin) / CLOCKS_PER_SEC; //printf("Time taken by memcpy for reading buffer %lf ", time_spent); /// Just for printing a dot for approximate a million point. Not much accurate. // printf("Copying done.\n"); //Do everything here begin = clock(); hipLaunchKernelGGL(( processKernel), dim3(N), dim3(M),m*sizeof(double), 0, d_q, d_buffer, d_cost, d_cost_prev, d_bsf, d_loc, d_t, d_tz, m, r, bsf, ep, EPOCH); hipDeviceSynchronize(); end = clock(); time_spent = (double) (end - begin) / CLOCKS_PER_SEC; printf("Time taken by kernel %lf, ", time_spent); //Do the next set of buffering // printf("Kernel done.\n"); epoch = EPOCH; ep = 0; begin = clock(); while (ep < epoch) { if (!fread(&d,sizeof(double),1,fp)) { last = true; break; } h_buffer[ep] = d; ep++; } end = clock(); time_spent = (double) (end - begin) / CLOCKS_PER_SEC; printf("Time taken for reading %lf\n ", time_spent); //printf("Loading next set done\n"); //begin = clock(); hipMemcpy(h_bsf, d_bsf, size * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(h_loc, d_loc, size * sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // end = clock(); // time_spent = (double) (end - begin) / CLOCKS_PER_SEC; // printf("Time taken for memcpy %lf", time_spent); // printf("computation"); // begin = clock(); for (k = 0; k < size; k++) { if (bsf > h_bsf[k]) { bsf = h_bsf[k]; if (it == 0) { loc = (it) * (EPOCH) + h_loc[k] - m + 1; } else { loc = 100000 + (it - 1) * (EPOCH) + h_loc[k] - m + 1; } } } // end = clock(); // time_spent = (double) (end - begin) / CLOCKS_PER_SEC; // printf("Time taken for computation %lf \n", time_spent); // printf("Computation Done.\n"); /// If the size of last chunk is less then EPOCH, then no more data and terminate. } it++; } //end = clock(); //time_spent = (double) (end - begin) / CLOCKS_PER_SEC; //printf("\nTime taken %lf ", time_spent); fclose(fp); free(h_q); free(h_buffer); free(h_bsf); hipFree(d_buffer); hipFree(d_q); hipFree(d_bsf); hipFree(d_loc); hipFree(d_cost); hipFree(d_cost_prev); hipFree(d_t); hipFree(d_tz); //t2 = clock(); printf("\n"); /// Note that loc and i are long long. // cout << "Location : " << loc << endl; // cout << "Distance : " << sqrt(bsf) << endl; // cout << "Data Scanned : " << i << endl; // cout << "Total Execution Time : " << (t2-t1)/CLOCKS_PER_SEC << " sec" << endl; /// printf is just easier for formating ;) printf("Distance %lf\n", sqrt(bsf)); printf("Location %lld\n", loc); printf("No of iterations %d\n", sh); return 0; }
dd3700e84eb7149a6cdd66013c52b037b01b4c7d.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; #define min(x,y) ((x)<(y)?(x):(y)) #define max(x,y) ((x)>(y)?(x):(y)) #define dist(x,y) ((x-y)*(x-y)) #define INF 1e10 //Pseudo Infitinte number for this code /// Calculate quick lower bound /// Usually, LB_Kim take time O(m) for finding top,bottom,fist and last. /// However, because of z-normalization the top and bottom cannot give siginifant benefits. /// And using the first and last points can be computed in constant time. /// The prunning power of LB_Kim is non-trivial, especially when the query is not long, say in length 128. /////////////////////Added to use constant memory/////////////// //extern __constant__ double q[]; ////////////////////////////////////////////////////// ///////////////////// Added to include locs /////////////////// __device__ int lock_Variable = 0; //0 loc open, 1 closed ///////////////////////////////////// __device__ double lb_kim_hierarchy(double *t, double *q, int j, int len, double mean, double std, double bsf = INF) { /// 1 point at front and back double d, lb; double x0 = (t[j] - mean) / std; double y0 = (t[(len - 1 + j)] - mean) / std; lb = dist(x0,q[0]) + dist(y0,q[len-1]); if (lb >= bsf) return lb; /// 2 points at front double x1 = (t[(j + 1)] - mean) / std; d = min(dist(x1,q[0]), dist(x0,q[1])); d = min(d, dist(x1,q[1])); lb += d; if (lb >= bsf) return lb; /// 2 points at back double y1 = (t[(len - 2 + j)] - mean) / std; d = min(dist(y1,q[len-1]), dist(y0, q[len-2]) ); d = min(d, dist(y1,q[len-2])); lb += d; if (lb >= bsf) return lb; /// 3 points at front double x2 = (t[(j + 2)] - mean) / std; d = min(dist(x0,q[2]), dist(x1, q[2])); d = min(d, dist(x2,q[2])); d = min(d, dist(x2,q[1])); d = min(d, dist(x2,q[0])); lb += d; if (lb >= bsf) return lb; /// 3 points at back double y2 = (t[(len - 3 + j)] - mean) / std; d = min(dist(y0,q[len-3]), dist(y1, q[len-3])); d = min(d, dist(y2,q[len-3])); d = min(d, dist(y2,q[len-2])); d = min(d, dist(y2,q[len-1])); lb += d; return lb; } __device__ double dtw(double* A, double* B, int m, int r, double* costM, double* cost_prevM, int bsfindex, double bsf = INF) { double *cost_tmp; int i, j, k; double x, y, z, min_cost; int start = bsfindex * (2 * r + 1); double* cost = costM + start; double*cost_prev = cost_prevM + start; /// Instead of using matrix of size O(m^2) or O(mr), we will reuse two array of size O(r). // cudaMalloc((void**)&cost, (2*r+1) * sizeof(double)); for (k = 0; k < 2 * r + 1; k++) cost[k] = INF; // cudaMalloc((void**)&cost_prev, (2*r+1) * sizeof(double)); for (k = 0; k < 2 * r + 1; k++) cost_prev[k] = INF; for (i = 0; i < m; i++) { k = max(0,r-i); min_cost = INF; for (j = max(0,i-r); j <= min(m-1,i+r); j++, k++) { /// Initialize all row and column if ((i == 0) && (j == 0)) { cost[k] = dist(A[0],B[0]); min_cost = cost[k]; continue; } if ((j - 1 < 0) || (k - 1 < 0)) y = INF; else y = cost[k - 1]; if ((i - 1 < 0) || (k + 1 > 2 * r)) x = INF; else x = cost_prev[k + 1]; if ((i - 1 < 0) || (j - 1 < 0)) z = INF; else z = cost_prev[k]; /// Classic DTW calculation cost[k] = min( min( x, y) , z) + dist(A[i],B[j]); /// Find minimum cost in row for early abandoning (possibly to use column instead of row). if (cost[k] < min_cost) { min_cost = cost[k]; } } /// We can abandon early if the current cummulative distace with lower bound together are larger than bsf if (i + r < m - 1 && min_cost >= bsf) { return min_cost; } /// Move current array to previous array. cost_tmp = cost; cost = cost_prev; cost_prev = cost_tmp; } k--; /// the DTW distance is in the last cell in the matrix of size O(m^2) or at the middle of our array. double final_dtw = cost_prev[k]; return final_dtw; } __global__ void processKernel(double* queue, double* buffer, double* cost, double* cost_prev, double* bsf_a, int* loc_a, double* tM, double* tzM, int m, int r, double bsf, int size, int EPOCH) { extern __shared__ double q[]; int shared_index = threadIdx.x; while(shared_index < m){ q[shared_index] = queue[shared_index]; shared_index += blockDim.x; } //printf("Hello"); int N = gridDim.x; int M = blockDim.x; int i = blockIdx.x; int j = threadIdx.x; int items_per_a = EPOCH / (N * M); int maxindex = (size - 1) / items_per_a; double lb_kim; int bsfindex = i * M + j; int sindex = bsfindex * items_per_a; int loc; int k; double d; double *t, *tz; double ex, ex2, mean, std, dist; t = tM + bsfindex * 2 * m; tz = tzM + bsfindex * 2 * m; /// Initial the cummulative lower bound ex = 0; ex2 = 0; int offset = m; if (bsfindex == maxindex) offset = 0; if (bsfindex <= maxindex) for (i = 0; i < items_per_a + offset; i++) { d = (double) buffer[sindex + i]; ex += d; ex2 += d * d; t[i % m] = d; t[(i % m) + m] = d; /// If there is enough data in t, the DTW distance can be calculated if (i >= m - 1) { mean = ex / m; std = ex2 / m; std = sqrt(std - mean * mean); /// compute the start location of the data in the current circular array, t j = (i + 1) % m; /// Use a constant lower bound to prune the obvious subsequence lb_kim = lb_kim_hierarchy(t, q, j, m, mean, std, bsf); if (lb_kim < bsf) { for (k = 0; k < m; k++) { tz[k] = (t[(k + j)] - mean) / std; } dist = dtw(tz, q, m, r, cost, cost_prev, bsfindex, bsf); ////////////////////////////// Implementing locks ////////////////////////////////// ///Previous code // // if (dist < bsf) { /// Update bsf // /// loc is the real starting location of the nearest neighbor in the file // bsf = dist; // loc = sindex + i; // } /////////End of previous code ///////// Implementing loc if (dist < bsf) { bool loop = true; while (loop) { if (atomicCAS(&lock_Variable, 0, 1)) { //If loc open (loc == 0) then close it (make it equal to 1) if (dist < bsf) { bsf = dist; loc = sindex + i; } lock_Variable = 0; loop = false; } } } /////////////////////////////////////////////////////////////////////////////////// } /// Reduce obsolute points from sum and sum square ex -= t[j]; ex2 -= t[j] * t[j]; } } bsf_a[bsfindex] = bsf; loc_a[bsfindex] = loc; //Some issue which popped up now . } void error(int id) { if (id == 1) printf("ERROR : Memory can't be allocated!!!\n\n"); else if (id == 2) printf("ERROR : File not Found!!!\n\n"); else if (id == 3) printf("ERROR : Can't create Output File!!!\n\n"); else if (id == 4) { printf("ERROR : Invalid Number of Arguments!!!\n"); printf( "Command Usage: UCR_DTW.exe data-file query-file m R\n\n"); printf( "For example : UCR_DTW.exe data.txt query.txt 128 0.05\n"); } exit(1); } /// Main Function int main(int argc, char *argv[]) { FILE *fp; /// data file pointer FILE *qp; /// query file pointer double bsf = INF; /// best-so-far double *h_q; /// data array and query array clock_t begin, end; double time_spent; double d; long long i; double ex, ex2, mean, std; int m = -1, r = -1; long long loc = 0; // double t1//, t2; //int kim = 0, keogh = 0, keogh2 = 0; double *h_buffer; int N = 10, M = 100; int sh= 0; /// For every EPOCH points, all cummulative values, such as ex (sum), ex2 (sum square), will be restarted for reducing the doubleing point error. int EPOCH = 1000000; int epoch; //Optimization /// If not enough input, display an error. if (argc <= 3) error(4); /// read size of the query if (argc > 3) m = atol(argv[3]); /// read warping windows if (argc > 4) { double R = atof(argv[4]); if (R <= 1) r = floor(R * m); else r = floor(R); } if (argc > 7) { N = atoi(argv[5]); M = atoi(argv[6]); EPOCH = atol(argv[7]); } // m = 128; // r = 6; fp = fopen(argv[1], "rb"); // fp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Data.txt", "r"); // if( fp == NULL ) // error(2); qp = fopen(argv[2], "r"); // qp = fopen("/home/ubuntu/Desktop/DTW Project/Executable/Query.txt", "r"); // if( qp == NULL ) // error(2); /// start the clock //t1 = clock(); /// malloc everything here h_q = (double *) malloc(sizeof(double) * m); if (h_q == NULL) error(1); h_buffer = (double *) malloc(sizeof(double) * (EPOCH)); if (h_buffer == NULL) error(1); /// Read query file bsf = INF; i = 0; ex = ex2 = 0; while (fscanf(qp, "%lf", &d) != EOF && i < m) { ex += d; ex2 += d * d; h_q[i] = d; i++; } fclose(qp); /// Do z-normalize the query, keep in same array, q mean = ex / m; std = ex2 / m; std = sqrt(std - mean * mean); for (i = 0; i < m; i++) h_q[i] = (h_q[i] - mean) / std; int size = N * M; double* h_bsf = (double *) malloc(sizeof(double) * size); int* h_loc = (int *) malloc(sizeof(int) * size); for (i = 0; i < size; i++) { h_bsf[i] = INF; h_loc[i] = 0; } //Allocate all the cuda Stuffs double *d_q; double *d_buffer, *d_bsf; double *d_cost, *d_cost_prev; double *d_t, *d_tz; int* d_loc; cudaMalloc((void**) &d_buffer, (EPOCH) * sizeof(double)); cudaMalloc((void**) &d_cost, (2 * r + 1) * size * sizeof(double)); cudaMalloc((void**) &d_cost_prev, (2 * r + 1) * size * sizeof(double)); cudaMalloc((void**) &d_bsf, size * sizeof(double)); cudaMalloc((void**) &d_t, 2 * m * size * sizeof(double)); cudaMalloc((void**) &d_tz, 2 * m * size * sizeof(double)); cudaMalloc((void**) &d_q, m * sizeof(double)); cudaMalloc((void**) &d_loc, size * sizeof(int)); ///Copying BSF array cudaMemcpy(d_bsf, h_bsf, m * sizeof(double), cudaMemcpyHostToDevice); ///Copy all the Query related arrays cudaMemcpy(d_q, h_q, m * sizeof(double), cudaMemcpyHostToDevice); bool done = false; bool last = false; int it = 0, ep = 0, k = 0; //begin = clock(); while (!done) { /// Read first m-1 points if (it == 0) { epoch = 100000; while (ep < epoch) { if (!fread(&d,sizeof(double),1,fp)){ //printf("%lf\n",d ); break; } h_buffer[ep] = d; ep++; } } /// Data are read in chunk of size EPOCH. /// When there is nothing to read, the loop is end. if (ep <= m - 1) { done = true; } else { if (last) { done = true; } //printf("Reading Done.\n"); sh ++; //begin = clock(); cudaMemcpy(d_buffer, h_buffer, ep * sizeof(double), cudaMemcpyHostToDevice); // to copy from CPU to GPU cudaDeviceSynchronize(); //end = clock(); //time_spent = (double) (end - begin) / CLOCKS_PER_SEC; //printf("Time taken by memcpy for reading buffer %lf ", time_spent); /// Just for printing a dot for approximate a million point. Not much accurate. // printf("Copying done.\n"); //Do everything here begin = clock(); processKernel<<<N, M,m*sizeof(double)>>>(d_q, d_buffer, d_cost, d_cost_prev, d_bsf, d_loc, d_t, d_tz, m, r, bsf, ep, EPOCH); cudaDeviceSynchronize(); end = clock(); time_spent = (double) (end - begin) / CLOCKS_PER_SEC; printf("Time taken by kernel %lf, ", time_spent); //Do the next set of buffering // printf("Kernel done.\n"); epoch = EPOCH; ep = 0; begin = clock(); while (ep < epoch) { if (!fread(&d,sizeof(double),1,fp)) { last = true; break; } h_buffer[ep] = d; ep++; } end = clock(); time_spent = (double) (end - begin) / CLOCKS_PER_SEC; printf("Time taken for reading %lf\n ", time_spent); //printf("Loading next set done\n"); //begin = clock(); cudaMemcpy(h_bsf, d_bsf, size * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_loc, d_loc, size * sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // end = clock(); // time_spent = (double) (end - begin) / CLOCKS_PER_SEC; // printf("Time taken for memcpy %lf", time_spent); // printf("computation"); // begin = clock(); for (k = 0; k < size; k++) { if (bsf > h_bsf[k]) { bsf = h_bsf[k]; if (it == 0) { loc = (it) * (EPOCH) + h_loc[k] - m + 1; } else { loc = 100000 + (it - 1) * (EPOCH) + h_loc[k] - m + 1; } } } // end = clock(); // time_spent = (double) (end - begin) / CLOCKS_PER_SEC; // printf("Time taken for computation %lf \n", time_spent); // printf("Computation Done.\n"); /// If the size of last chunk is less then EPOCH, then no more data and terminate. } it++; } //end = clock(); //time_spent = (double) (end - begin) / CLOCKS_PER_SEC; //printf("\nTime taken %lf ", time_spent); fclose(fp); free(h_q); free(h_buffer); free(h_bsf); cudaFree(d_buffer); cudaFree(d_q); cudaFree(d_bsf); cudaFree(d_loc); cudaFree(d_cost); cudaFree(d_cost_prev); cudaFree(d_t); cudaFree(d_tz); //t2 = clock(); printf("\n"); /// Note that loc and i are long long. // cout << "Location : " << loc << endl; // cout << "Distance : " << sqrt(bsf) << endl; // cout << "Data Scanned : " << i << endl; // cout << "Total Execution Time : " << (t2-t1)/CLOCKS_PER_SEC << " sec" << endl; /// printf is just easier for formating ;) printf("Distance %lf\n", sqrt(bsf)); printf("Location %lld\n", loc); printf("No of iterations %d\n", sh); return 0; }
e0f062da1594e8ee117980bdaf5c54d67f29ecff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample demonstrates two adaptive image denoising technqiues: * KNN and NLM, based on computation of both geometric and color distance * between texels. While both techniques are already implemented in the * DirectX SDK using shaders, massively speeded up variation * of the latter techique, taking advantage of shared memory, is implemented * in addition to DirectX counterparts. * See supplied whitepaper for more explanations. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "imageBinarization.h" //////////////////////////////////////////////////////////////////////////////// // Helper functions //////////////////////////////////////////////////////////////////////////////// #define THRESHOLD 127 __device__ unsigned char __max(unsigned char x, unsigned char y) { return (x > y) ? x : y; } __device__ unsigned char __min(unsigned char x, unsigned char y) { return (x < y) ? x : y; } int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } // __device__ float lerpf(float a, float b, float c) // { // return a + (b - a) * c; // } // __device__ float vecLen(float4 a, float4 b) // { // return ( // (b.x - a.x) * (b.x - a.x) + // (b.y - a.y) * (b.y - a.y) + // (b.z - a.z) * (b.z - a.z) // ); // } __device__ TColor make_color(unsigned char r, unsigned char g, unsigned char b, unsigned char a) { return ((int)(a * 255.0f) << 24) | ((int)(b * 255.0f) << 16) | ((int)(g * 255.0f) << 8) | ((int)(r * 255.0f) << 0); } //////////////////////////////////////////////////////////////////////////////// // Global data handlers and parameters //////////////////////////////////////////////////////////////////////////////// //Texture reference and channel descriptor for image texture //texture<uchar4, 2, hipReadModeNormalizedFloat> texImage; texture<uchar4, 2, hipReadModeElementType> texImage; hipChannelFormatDesc uchar4tex = hipCreateChannelDesc<uchar4>(); //CUDA array descriptor hipArray *a_Src; //////////////////////////////////////////////////////////////////////////////// // Filtering kernels //////////////////////////////////////////////////////////////////////////////// // #include "imageDenoising_copy_kernel.cuh" // #include "imageDenoising_nlm_kernel.cuh" // #include "imageDenoising_nlm2_kernel_hip.cuh" //----------------------------------------------------------------------------- /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //The macro CUPRINTF is defined for architectures //with different compute capabilities. #if __CUDA_ARCH__ < 200 //Compute capability 1.x architectures #define CUPRINTF cuPrintf #else //Compute capability 2.x architectures #define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \ blockIdx.y*gridDim.x+blockIdx.x,\ threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\ __VA_ARGS__) #endif //////////////////////////////////////////////////////////////////////////////// // image binarization kernel //////////////////////////////////////////////////////////////////////////////// __global__ void imageBinarization( unsigned char *dst, int imageW, int imageH ) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < imageW && iy < imageH) { uchar4 currPixel = tex2D(texImage, ix, iy); unsigned char b = (unsigned char) currPixel.z; unsigned char g = (unsigned char) currPixel.y; unsigned char r = (unsigned char) currPixel.x; unsigned char result; #if defined(orig_code) unsigned char mi = __min(r, __min(g, b)); unsigned char ma = __max(r, __max(g, b)); result = (((unsigned short) ma + (unsigned short) mi) > THRESHOLD * 2 ) ? 255 : 0; #endif #if defined(sw_npu) float parrotInput[3]; float parrotOutput[1]; parrotInput[0] = r / 255.0; parrotInput[1] = g / 255.0; parrotInput[2] = b / 255.0; float layer_1_0 = parrotInput[0] * 4.699794 + parrotInput[1] * 0.877188 + parrotInput[2] * 0.496408 + 1.0f * -3.661214; float layer_1_1 = parrotInput[0] * -3.402053 + parrotInput[1] * 3.861662 + parrotInput[2] * -6.814451 + 1.0f * 1.839221; float layer_1_2 = parrotInput[0] * -2.248324 + parrotInput[1] * -8.565042 + parrotInput[2] * 3.407546 + 1.0f * 1.899614; float layer_1_3 = parrotInput[0] * 4.647291 + parrotInput[1] * 0.815243 + parrotInput[2] * 0.595827 + 1.0f * -3.638438; float layer_2_0 = sigmoid(layer_1_0, 0.500000) * -10.469186 + sigmoid(layer_1_1, 0.500000) * 24.400442 + sigmoid(layer_1_2, 0.500000) * 24.699705 + sigmoid(layer_1_3, 0.500000) * -10.481432 + 1.0f * 0.666498; layer_2_0 = sigmoid(layer_2_0, 0.5); float layer_2_1 = sigmoid(layer_1_0, 0.500000) * -9.471014 + sigmoid(layer_1_1, 0.500000) * 19.837952 + sigmoid(layer_1_2, 0.500000) * 20.186312 + sigmoid(layer_1_3, 0.500000) * -9.508874 + 1.0f * 1.531181; layer_2_1 = sigmoid(layer_2_1, 0.5); float layer_3_0 = sigmoid(layer_2_0, 0.500000) * -10.481432 + sigmoid(layer_2_1, 0.000000) * 0.666498 + 1.0f * -3.288635; layer_3_0 = sigmoid(layer_3_0, 0.5); parrotOutput[0] = layer_3_0; if(parrotOutput[0] > 0.7) result = 255; else result = 0; #endif dst[imageW * iy + ix] = result; }; } extern "C" void cuda_imageBinarization( unsigned char *d_dst, int imageW, int imageH ) { //printf("cuda image binarization\n"); #pragma parrot.start("imageBinarization") dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); hipLaunchKernelGGL(( imageBinarization), dim3(grid), dim3(threads), 0, 0, d_dst, imageW, imageH); hipDeviceSynchronize(); #pragma parrot.end("imageBinarization") } extern "C" hipError_t CUDA_Bind2TextureArray() { return hipBindTextureToArray(texImage, a_Src); } extern "C" hipError_t CUDA_UnbindTexture() { return hipUnbindTexture(texImage); } extern "C" hipError_t CUDA_MallocArray(uchar4 **h_Src, int imageW, int imageH) { hipError_t error; error = hipMallocArray(&a_Src, &uchar4tex, imageW, imageH); error = hipMemcpyToArray(a_Src, 0, 0, *h_Src, imageW * imageH * sizeof(uchar4), hipMemcpyHostToDevice ); return error; } extern "C" hipError_t CUDA_FreeArray() { return hipFreeArray(a_Src); }
e0f062da1594e8ee117980bdaf5c54d67f29ecff.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample demonstrates two adaptive image denoising technqiues: * KNN and NLM, based on computation of both geometric and color distance * between texels. While both techniques are already implemented in the * DirectX SDK using shaders, massively speeded up variation * of the latter techique, taking advantage of shared memory, is implemented * in addition to DirectX counterparts. * See supplied whitepaper for more explanations. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "imageBinarization.h" //////////////////////////////////////////////////////////////////////////////// // Helper functions //////////////////////////////////////////////////////////////////////////////// #define THRESHOLD 127 __device__ unsigned char __max(unsigned char x, unsigned char y) { return (x > y) ? x : y; } __device__ unsigned char __min(unsigned char x, unsigned char y) { return (x < y) ? x : y; } int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } // __device__ float lerpf(float a, float b, float c) // { // return a + (b - a) * c; // } // __device__ float vecLen(float4 a, float4 b) // { // return ( // (b.x - a.x) * (b.x - a.x) + // (b.y - a.y) * (b.y - a.y) + // (b.z - a.z) * (b.z - a.z) // ); // } __device__ TColor make_color(unsigned char r, unsigned char g, unsigned char b, unsigned char a) { return ((int)(a * 255.0f) << 24) | ((int)(b * 255.0f) << 16) | ((int)(g * 255.0f) << 8) | ((int)(r * 255.0f) << 0); } //////////////////////////////////////////////////////////////////////////////// // Global data handlers and parameters //////////////////////////////////////////////////////////////////////////////// //Texture reference and channel descriptor for image texture //texture<uchar4, 2, cudaReadModeNormalizedFloat> texImage; texture<uchar4, 2, cudaReadModeElementType> texImage; cudaChannelFormatDesc uchar4tex = cudaCreateChannelDesc<uchar4>(); //CUDA array descriptor cudaArray *a_Src; //////////////////////////////////////////////////////////////////////////////// // Filtering kernels //////////////////////////////////////////////////////////////////////////////// // #include "imageDenoising_copy_kernel.cuh" // #include "imageDenoising_nlm_kernel.cuh" // #include "imageDenoising_nlm2_kernel.cuh" //----------------------------------------------------------------------------- /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //The macro CUPRINTF is defined for architectures //with different compute capabilities. #if __CUDA_ARCH__ < 200 //Compute capability 1.x architectures #define CUPRINTF cuPrintf #else //Compute capability 2.x architectures #define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \ blockIdx.y*gridDim.x+blockIdx.x,\ threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\ __VA_ARGS__) #endif //////////////////////////////////////////////////////////////////////////////// // image binarization kernel //////////////////////////////////////////////////////////////////////////////// __global__ void imageBinarization( unsigned char *dst, int imageW, int imageH ) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < imageW && iy < imageH) { uchar4 currPixel = tex2D(texImage, ix, iy); unsigned char b = (unsigned char) currPixel.z; unsigned char g = (unsigned char) currPixel.y; unsigned char r = (unsigned char) currPixel.x; unsigned char result; #if defined(orig_code) unsigned char mi = __min(r, __min(g, b)); unsigned char ma = __max(r, __max(g, b)); result = (((unsigned short) ma + (unsigned short) mi) > THRESHOLD * 2 ) ? 255 : 0; #endif #if defined(sw_npu) float parrotInput[3]; float parrotOutput[1]; parrotInput[0] = r / 255.0; parrotInput[1] = g / 255.0; parrotInput[2] = b / 255.0; float layer_1_0 = parrotInput[0] * 4.699794 + parrotInput[1] * 0.877188 + parrotInput[2] * 0.496408 + 1.0f * -3.661214; float layer_1_1 = parrotInput[0] * -3.402053 + parrotInput[1] * 3.861662 + parrotInput[2] * -6.814451 + 1.0f * 1.839221; float layer_1_2 = parrotInput[0] * -2.248324 + parrotInput[1] * -8.565042 + parrotInput[2] * 3.407546 + 1.0f * 1.899614; float layer_1_3 = parrotInput[0] * 4.647291 + parrotInput[1] * 0.815243 + parrotInput[2] * 0.595827 + 1.0f * -3.638438; float layer_2_0 = sigmoid(layer_1_0, 0.500000) * -10.469186 + sigmoid(layer_1_1, 0.500000) * 24.400442 + sigmoid(layer_1_2, 0.500000) * 24.699705 + sigmoid(layer_1_3, 0.500000) * -10.481432 + 1.0f * 0.666498; layer_2_0 = sigmoid(layer_2_0, 0.5); float layer_2_1 = sigmoid(layer_1_0, 0.500000) * -9.471014 + sigmoid(layer_1_1, 0.500000) * 19.837952 + sigmoid(layer_1_2, 0.500000) * 20.186312 + sigmoid(layer_1_3, 0.500000) * -9.508874 + 1.0f * 1.531181; layer_2_1 = sigmoid(layer_2_1, 0.5); float layer_3_0 = sigmoid(layer_2_0, 0.500000) * -10.481432 + sigmoid(layer_2_1, 0.000000) * 0.666498 + 1.0f * -3.288635; layer_3_0 = sigmoid(layer_3_0, 0.5); parrotOutput[0] = layer_3_0; if(parrotOutput[0] > 0.7) result = 255; else result = 0; #endif dst[imageW * iy + ix] = result; }; } extern "C" void cuda_imageBinarization( unsigned char *d_dst, int imageW, int imageH ) { //printf("cuda image binarization\n"); #pragma parrot.start("imageBinarization") dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); imageBinarization<<<grid, threads>>>(d_dst, imageW, imageH); cudaDeviceSynchronize(); #pragma parrot.end("imageBinarization") } extern "C" cudaError_t CUDA_Bind2TextureArray() { return cudaBindTextureToArray(texImage, a_Src); } extern "C" cudaError_t CUDA_UnbindTexture() { return cudaUnbindTexture(texImage); } extern "C" cudaError_t CUDA_MallocArray(uchar4 **h_Src, int imageW, int imageH) { cudaError_t error; error = cudaMallocArray(&a_Src, &uchar4tex, imageW, imageH); error = cudaMemcpyToArray(a_Src, 0, 0, *h_Src, imageW * imageH * sizeof(uchar4), cudaMemcpyHostToDevice ); return error; } extern "C" cudaError_t CUDA_FreeArray() { return cudaFreeArray(a_Src); }
8625ddecd7164231626aff349b29616568912349.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_4_back; int xdim0_update_halo_kernel3_plus_4_back_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_4_back; int ydim0_update_halo_kernel3_plus_4_back_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_4_back; int xdim1_update_halo_kernel3_plus_4_back_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_4_back; int ydim1_update_halo_kernel3_plus_4_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_plus_4_back * (y) + \ xdim0_update_halo_kernel3_plus_4_back * \ ydim0_update_halo_kernel3_plus_4_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_plus_4_back * (y) + \ xdim1_update_halo_kernel3_plus_4_back * \ ydim1_update_halo_kernel3_plus_4_back * (z)) // user function __device__ inline void update_halo_kernel3_plus_4_back_gpu(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, 0, 4)]; if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, 0, 4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_4_back(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_back + idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_back * ydim0_update_halo_kernel3_plus_4_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_back + idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_back * ydim1_update_halo_kernel3_plus_4_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_4_back_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 112)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(112, "update_halo_kernel3_plus_4_back"); OPS_kernels[112].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_plus_4_back_h || ydim0 != ydim0_update_halo_kernel3_plus_4_back_h || xdim1 != xdim1_update_halo_kernel3_plus_4_back_h || ydim1 != ydim1_update_halo_kernel3_plus_4_back_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_plus_4_back_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_plus_4_back_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_plus_4_back_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_plus_4_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_4_back), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[112].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
8625ddecd7164231626aff349b29616568912349.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_4_back; int xdim0_update_halo_kernel3_plus_4_back_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_4_back; int ydim0_update_halo_kernel3_plus_4_back_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_4_back; int xdim1_update_halo_kernel3_plus_4_back_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_4_back; int ydim1_update_halo_kernel3_plus_4_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_plus_4_back * (y) + \ xdim0_update_halo_kernel3_plus_4_back * \ ydim0_update_halo_kernel3_plus_4_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_plus_4_back * (y) + \ xdim1_update_halo_kernel3_plus_4_back * \ ydim1_update_halo_kernel3_plus_4_back * (z)) // user function __device__ inline void update_halo_kernel3_plus_4_back_gpu(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, 0, 4)]; if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, 0, 4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_4_back(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_back + idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_back * ydim0_update_halo_kernel3_plus_4_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_back + idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_back * ydim1_update_halo_kernel3_plus_4_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_4_back_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 112)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(112, "update_halo_kernel3_plus_4_back"); OPS_kernels[112].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_plus_4_back_h || ydim0 != ydim0_update_halo_kernel3_plus_4_back_h || xdim1 != xdim1_update_halo_kernel3_plus_4_back_h || ydim1 != ydim1_update_halo_kernel3_plus_4_back_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_plus_4_back_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_plus_4_back_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_plus_4_back_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_plus_4_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel3_plus_4_back<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[112].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
89700345d9eca2e712d3a301a5bd8a628eff8fe4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> void helloCPU() { printf("Hello from the CPU.\n"); } __global__ void helloGPU() { printf("Hello from the GPU!\n"); } int main() { helloCPU(); hipLaunchKernelGGL(( helloGPU), dim3(1), dim3(1), 0, 0, ); hipDeviceSynchronize(); }
89700345d9eca2e712d3a301a5bd8a628eff8fe4.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> void helloCPU() { printf("Hello from the CPU.\n"); } __global__ void helloGPU() { printf("Hello from the GPU!\n"); } int main() { helloCPU(); helloGPU<<<1, 1>>>(); cudaDeviceSynchronize(); }
3a5c2d0075e4db9c0815585cf9887c3cd4be5302.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mex.h" #include "stdio.h" #include <string.h> #include "rocblas.h" #include <math.h> #pragma comment(lib,"cublas.lib") #define blocksize 32 #define THREAD_NUM 256 #define BLOCK_NUM 512 #define eps1 1.0e-30 #define eps 1.0e2 __global__ void Active(float *output_x,float *output_a1,float *b1,float *ft1,float *it1,float *cct1,float *ot1,int sum,int n_a) { const int tid=threadIdx.x; const int bid=blockIdx.x; int p,q,n_a4=4*n_a; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { p=u/n_a4; q=u%n_a4; if(q<n_a) ft1[q+p*n_a]=1/(1+exp(-(output_x[u]+output_a1[u]+b1[q]))); else if(q>=n_a&&q<(2*n_a)) it1[q-n_a+p*n_a]=1/(1+exp(-(output_x[u]+output_a1[u]+b1[q]))); else if(q>=(2*n_a)&&q<(3*n_a)) cct1[q-2*n_a+p*n_a]=2/(1+exp(-2*(output_x[u]+output_a1[u]+b1[q])))-1; else ot1[q-3*n_a+p*n_a]=1/(1+exp(-(output_x[u]+output_a1[u]+b1[q]))); } } __global__ void pointwise(float *ft1,float *it1,float *cct1,float *ot1,float *a_next1,float *c_next1,float *c_prev1,int sum) { const int tid=threadIdx.x; const int bid=blockIdx.x; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { c_next1[u]=ft1[u]*c_prev1[u]+it1[u]*cct1[u]; a_next1[u]=ot1[u]*(2/(1+exp(-2*c_next1[u]))-1); } } __global__ void Add(float *a,float *by,int n_y,int m) { const int tid=threadIdx.x; const int bid=blockIdx.x; for(int u=tid+bid*THREAD_NUM;u<n_y*m;u+=BLOCK_NUM*THREAD_NUM) a[u]=exp(a[u]+by[u%n_y]); } __global__ void sum(float *a,float *b,int n_y,int m) { const int tid=threadIdx.x; const int bid=blockIdx.x; int offset=1,mask=1; __shared__ float shared[THREAD_NUM]; shared[tid]=0; for(int u=tid+bid*n_y;u<n_y*(bid+1);u+= 1) { shared[tid]+=a[u]; } while(offset<THREAD_NUM) { if (tid&mask == 0) { shared[tid] += shared[tid + offset]; } offset += offset; mask=offset+mask; __syncthreads(); } if(tid==0) { b[bid]=shared[0]; } } __global__ void out(float *a,float *b,float *y_pred,float *output_diff,float *y_t,float *error,int sum,int n_y) { const int tid=threadIdx.x; const int bid=blockIdx.x; int r,p; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { r=u/n_y; p=y_t[r]-1; y_pred[u]=a[u]/b[r]; if((u%n_y)==p) { output_diff[u]=1-y_pred[u]; error[r]+=-log(y_pred[u]); } else output_diff[u]=-y_pred[u]; } } double add(float *a,int m,int T_x) { double error=0; for(int i=0;i<m;i++) error=error+a[i]; return error/(m*T_x); } __global__ void Da(float *da_next2,float *da,int sum) { const int tid=threadIdx.x; const int bid=blockIdx.x; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { da_next2[u]=da_next2[u]+da[u]; } } __global__ void Dc(float *dc_next2,float *dc_prev2,float *da_next2,float *c_next2,float *ft2,float *ot2,int sum) { const int tid=threadIdx.x; const int bid=blockIdx.x; float r; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { r=2/(1+exp(-2*c_next2[u]))-1; dc_next2[u]=dc_next2[u]+ot2[u]*(1-r*r)*da_next2[u]; dc_prev2[u]=dc_next2[u]*ft2[u]; } } __global__ void d_door(float *door,float *da_next2,float *dc_next2,float *c_next2,float *c_prev2,float *ot2,float *it2,float *cct2,float *ft2,int sum,int n_a) { const int tid=threadIdx.x; const int bid=blockIdx.x; int p,q,n_a4=4*n_a,r; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { p=u/n_a4; q=u%n_a4; if(q<n_a) { r=p*n_a+q; door[q+p*n_a4]=dc_next2[r]*c_prev2[r]*ft2[r]*(1-ft2[r]); } else if(q>=n_a&&q<(2*n_a)) { r=p*n_a+q-n_a; door[q+p*n_a4]=dc_next2[r]*cct2[r]*it2[r]*(1-it2[r]); } else if(q>=(2*n_a)&&q<(3*n_a)) { r=p*n_a+q-2*n_a; door[q+p*n_a4]=dc_next2[r]*it2[r]*(1-cct2[r]*cct2[r]); } else { r=p*n_a+q-3*n_a; door[q+p*n_a4]=da_next2[r]*(2/(1+exp(-2*c_next2[r]))-1)*ot2[r]*(1-ot2[r]); } } } __global__ void d_bias(float *door,float *d_b2,int a,int b) { const int bid=blockIdx.x; for(int u=bid;u<a;u+=BLOCK_NUM) { for(int i=0;i<b;i++) { d_b2[u]=d_b2[u]+door[a*i+u]; } } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //[gradients,Allerror]=LSTM(train_x{num,1},train_y{num,1},parameters); const size_t *dim_array = mxGetDimensions(prhs[0]); int n_x=*dim_array,m=*(dim_array+1),T_x=*(dim_array+2); int n_a1=256,n_a2=256,n_y=5; size_t size_x=n_x*m*T_x*sizeof(float); size_t size_y=m*T_x*sizeof(float); size_t layer_1=n_a1*m*sizeof(float); size_t layer_2=n_a2*m*sizeof(float); // float *x_batch=(float*)mxGetPr(prhs[0]),*y_batch=(float*)mxGetPr(prhs[1]); float *host_w1_x=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],0))); float *host_w1_a1=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],1))); float *host_b1=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],2))); float *host_w2_a1=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],3))); float *host_w2_a2=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],4))); float *host_b2=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],5))); float *host_wy=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],6))); float *host_by=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],7))); // float *a1,*c1,*a2,*c2; hipMalloc((void**)&a1,layer_1*(T_x+1)); hipMalloc((void**)&c1,layer_1*(T_x+1)); hipMalloc((void**)&a2,layer_2*(T_x+1)); hipMalloc((void**)&c2,layer_2*(T_x+1)); hipMemset(a1,0,layer_1*(T_x+1)); hipMemset(c1,0,layer_1*(T_x+1)); hipMemset(a2,0,layer_2*(T_x+1)); hipMemset(c2,0,layer_2*(T_x+1)); //x,y,wGPU float *x_t,*y_t; hipMalloc((void**)&x_t,size_x); hipMalloc((void**)&y_t,size_y); hipMemcpy(x_t,x_batch,size_x,hipMemcpyHostToDevice); hipMemcpy(y_t,y_batch,size_y,hipMemcpyHostToDevice); float *w1_x,*w1_a1,*b1,*w2_a1,*w2_a2,*b2,*wy,*by; hipMalloc((void**)&w1_x,4*n_x*n_a1*sizeof(float)); hipMalloc((void**)&w1_a1,4*n_a1*n_a1*sizeof(float)); hipMalloc((void**)&b1,4*n_a1*sizeof(float)); hipMalloc((void**)&w2_a1,4*n_a1*n_a2*sizeof(float)); hipMalloc((void**)&w2_a2,4*n_a2*n_a2*sizeof(float)); hipMalloc((void**)&b2,4*n_a2*sizeof(float)); hipMalloc((void**)&wy,n_y*n_a2*sizeof(float)); hipMalloc((void**)&by,n_y*sizeof(float)); hipMemcpy(w1_x,host_w1_x,4*n_x*n_a1*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(w1_a1,host_w1_a1,4*n_a1*n_a1*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(b1,host_b1,4*n_a1*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(w2_a1,host_w2_a1,4*n_a1*n_a2*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(w2_a2,host_w2_a2,4*n_a2*n_a2*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(b2,host_b2,4*n_a2*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(wy,host_wy,n_y*n_a2*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(by,host_by,n_y*sizeof(float),hipMemcpyHostToDevice); float *output_1,*output_2,*output_3,*output_4,*output_5,*output_6; hipMalloc((void**)&output_1,4*n_a1*m*sizeof(float)); hipMalloc((void**)&output_2,4*n_a1*m*sizeof(float)); hipMalloc((void**)&output_3,4*n_a2*m*sizeof(float)); hipMalloc((void**)&output_4,4*n_a2*m*sizeof(float)); hipMalloc((void**)&output_5,n_y*m*sizeof(float)); hipMalloc((void**)&output_6,m*sizeof(float)); hipMemset(output_6,0,m*sizeof(float)); float *ft1,*it1,*cct1,*ot1,*ft2,*it2,*cct2,*ot2; hipMalloc((void**)&ft1,n_a1*m*sizeof(float)*T_x); hipMalloc((void**)&it1,n_a1*m*sizeof(float)*T_x); hipMalloc((void**)&cct1,n_a1*m*sizeof(float)*T_x); hipMalloc((void**)&ot1,n_a1*m*sizeof(float)*T_x); hipMalloc((void**)&ft2,n_a2*m*sizeof(float)*T_x); hipMalloc((void**)&it2,n_a2*m*sizeof(float)*T_x); hipMalloc((void**)&cct2,n_a2*m*sizeof(float)*T_x); hipMalloc((void**)&ot2,n_a2*m*sizeof(float)*T_x); float *y_pred,*output_diff,*da,*error,*error_cpu=(float*)malloc(m*sizeof(float)); hipMalloc((void**)&y_pred,n_y*m*sizeof(float)*T_x); hipMalloc((void**)&output_diff,n_y*m*sizeof(float)*T_x); hipMalloc((void**)&da,n_a2*m*sizeof(float)*T_x); hipMalloc((void**)&error,m*sizeof(float)); hipMemset(error,0,m*sizeof(float)); float alpha=1,beta=0,beta1=1; hipblasHandle_t handle; hipblasCreate(&handle); dim3 dimBlock(blocksize, blocksize); for(int t=1;t<=T_x;t++){ hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,4*n_a1,m,n_x,&alpha,w1_x, 4*n_a1, x_t+(t-1)*n_x*m,n_x,&beta,output_1,4*n_a1); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,4*n_a1,m,n_a1,&alpha,w1_a1, 4*n_a1, a1+(t-1)*n_a1*m,n_a1,&beta,output_2,4*n_a1); Active<< <BLOCK_NUM,THREAD_NUM>> >(output_1,output_2,b1,ft1+(t-1)*n_a1*m,it1+(t-1)*n_a1*m,cct1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,4*n_a1*m,n_a1); pointwise<< <BLOCK_NUM,THREAD_NUM>> >(ft1+(t-1)*n_a1*m,it1+(t-1)*n_a1*m,cct1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,a1+t*n_a1*m,c1+t*n_a1*m,c1+(t-1)*n_a1*m,n_a1*m); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,4*n_a2,m,n_a1,&alpha,w2_a1, 4*n_a2, a1+t*n_a1*m,n_a1,&beta,output_3,4*n_a2); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,4*n_a2,m,n_a2,&alpha,w2_a2, 4*n_a2, a2+(t-1)*n_a2*m,n_a2,&beta,output_4,4*n_a2); Active<< <BLOCK_NUM,THREAD_NUM>> >(output_3,output_4,b2,ft2+(t-1)*n_a2*m,it2+(t-1)*n_a2*m,cct2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,4*n_a2*m,n_a2); pointwise<< <BLOCK_NUM,THREAD_NUM>> >(ft2+(t-1)*n_a2*m,it2+(t-1)*n_a2*m,cct2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,a2+t*n_a2*m,c2+t*n_a2*m,c2+(t-1)*n_a2*m,n_a2*m); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,n_y,m,n_a2,&alpha,wy, n_y, a2+t*n_a2*m,n_a2,&beta,output_5,n_y); Add<< <BLOCK_NUM,THREAD_NUM>> >(output_5,by,n_y,m); sum<< <m,THREAD_NUM>> >(output_5,output_6,n_y,m); out<< <BLOCK_NUM,THREAD_NUM>> >(output_5,output_6,y_pred+(t-1)*n_y*m,output_diff+(t-1)*n_y*m,y_t+(t-1)*m,error,n_y*m,n_y); hipblasSgemm(handle,HIPBLAS_OP_T,HIPBLAS_OP_N,n_a2,m,n_y,&alpha,wy, n_y, output_diff+(t-1)*n_y*m,n_y,&beta,da+(t-1)*n_a2*m,n_a2); } float *d_w1_x,*d_w1_a1,*d_b1,*d_w2_a1,*d_w2_a2,*d_b2,*d_wy,*d_by; hipMalloc((void**)&d_w1_x,4*n_x*n_a1*sizeof(float)); hipMalloc((void**)&d_w1_a1,4*n_a1*n_a1*sizeof(float)); hipMalloc((void**)&d_b1,4*n_a1*sizeof(float)); hipMalloc((void**)&d_w2_a1,4*n_a1*n_a2*sizeof(float)); hipMalloc((void**)&d_w2_a2,4*n_a2*n_a2*sizeof(float)); hipMalloc((void**)&d_b2,4*n_a2*sizeof(float)); hipMalloc((void**)&d_wy,n_y*n_a2*sizeof(float)); hipMalloc((void**)&d_by,n_y*sizeof(float)); hipMemset(d_w1_x,0,4*n_x*n_a1*sizeof(float)); hipMemset(d_w1_a1,0,4*n_a1*n_a1*sizeof(float)); hipMemset(d_b1,0,4*n_a1*sizeof(float)); hipMemset(d_w2_a1,0,4*n_a1*n_a2*sizeof(float)); hipMemset(d_w2_a2,0,4*n_a2*n_a2*sizeof(float)); hipMemset(d_b2,0,4*n_a2*sizeof(float)); hipMemset(d_wy,0,n_y*n_a2*sizeof(float)); hipMemset(d_by,0,n_y*sizeof(float)); float *d_a1,*d_c1,*d_a2,*d_c2; hipMalloc((void**)&d_a1,layer_1*(T_x+1)); hipMalloc((void**)&d_c1,layer_1*(T_x+1)); hipMalloc((void**)&d_a2,layer_2*(T_x+1)); hipMalloc((void**)&d_c2,layer_2*(T_x+1)); hipMemset(d_a1,0,layer_1*(T_x+1)); hipMemset(d_c1,0,layer_1*(T_x+1)); hipMemset(d_a2,0,layer_2*(T_x+1)); hipMemset(d_c2,0,layer_2*(T_x+1)); float *door2,*door1; hipMalloc((void**)&door2,4*n_a2*m*sizeof(float)); hipMalloc((void**)&door1,4*n_a1*m*sizeof(float)); for(int t=T_x;t>=1;t--){ Da<< <BLOCK_NUM,THREAD_NUM>> >(d_a2+t*n_a2*m,da+(t-1)*n_a2*m,n_a2*m); Dc<< <BLOCK_NUM,THREAD_NUM>> >(d_c2+t*n_a2*m,d_c2+(t-1)*n_a2*m,d_a2+t*n_a2*m,c2+t*n_a2*m,ft2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,n_a2*m); d_door<< <BLOCK_NUM,THREAD_NUM>> >(door2,d_a2+t*n_a2*m,d_c2+t*n_a2*m,c2+t*n_a2*m,c2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,it2+(t-1)*n_a2*m,cct2+(t-1)*n_a2*m,ft2+(t-1)*n_a2*m,4*n_a2*m,n_a2); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_T,4*n_a2,n_a2,m,&alpha,door2, 4*n_a2, a2+(t-1)*n_a2*m,n_a2,&beta1,d_w2_a2,4*n_a2); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_T,4*n_a2,n_a1,m,&alpha,door2, 4*n_a2, a1+t*n_a1*m,n_a1,&beta1,d_w2_a1,4*n_a2); d_bias<< <BLOCK_NUM,1>> >(door2,d_b2,4*n_a2,m); hipblasSgemm(handle,HIPBLAS_OP_T,HIPBLAS_OP_N,n_a2,m,4*n_a2,&alpha,w2_a2, 4*n_a2,door2 ,4*n_a2,&beta,d_a2+(t-1)*n_a2*m,n_a2); hipblasSgemm(handle,HIPBLAS_OP_T,HIPBLAS_OP_N,n_a1,m,4*n_a2,&alpha,w2_a1, 4*n_a2,door2, 4*n_a2,&beta1,d_a1+t*n_a1*m,n_a1); Dc<< <BLOCK_NUM,THREAD_NUM>> >(d_c1+t*n_a1*m,d_c1+(t-1)*n_a1*m,d_a1+t*n_a1*m,c1+t*n_a1*m,ft1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,n_a1*m); d_door<< <BLOCK_NUM,THREAD_NUM>> >(door1,d_a1+t*n_a1*m,d_c1+t*n_a1*m,c1+t*n_a1*m,c1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,it1+(t-1)*n_a1*m,cct1+(t-1)*n_a1*m,ft1+(t-1)*n_a1*m,4*n_a1*m,n_a1); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_T,4*n_a1,n_a1,m,&alpha,door1, 4*n_a1, a1+(t-1)*n_a1*m,n_a1,&beta1,d_w1_a1,4*n_a1); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_T,4*n_a1,n_x,m,&alpha,door1, 4*n_a1, x_t+(t-1)*n_x*m,n_x,&beta1,d_w1_x,4*n_a1); d_bias<< <BLOCK_NUM,1>> >(door1,d_b1,4*n_a1,m); hipblasSgemm(handle,HIPBLAS_OP_T,HIPBLAS_OP_N,n_a1,m,4*n_a1,&alpha,w1_a1, 4*n_a1,door1 ,4*n_a1,&beta,d_a1+(t-1)*n_a1*m,n_a1); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_T,n_y,n_a2,m,&alpha,output_diff+(t-1)*n_y*m, n_y,a2+t*n_a2*m,n_a2,&beta1,d_wy,n_y); d_bias<< <BLOCK_NUM,1>> >(output_diff+(t-1)*n_y*m,d_by,n_y,m); } /* const size_t dim[]={1,m}; plhs[2] = mxCreateNumericArray(2,dim ,mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(plhs[2]),output_6,m*sizeof(float), hipMemcpyDeviceToHost); */ hipMemcpy(error_cpu,error,m*sizeof(float),hipMemcpyDeviceToHost); double *Allerror; plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL); Allerror = mxGetPr(plhs[1]); *Allerror=add(error_cpu,m,T_x); hipFree(a1); hipFree(c1); hipFree(a2); hipFree(c2); hipFree(x_t); hipFree(y_t); hipFree(y_pred); hipFree(output_diff); hipFree(error); free(error_cpu); hipFree(w1_x); hipFree(w1_a1); hipFree(b1); hipFree(w2_a1); hipFree(w2_a2); hipFree(b2); hipFree(wy); hipFree(by); hipFree(output_1); hipFree(output_2); hipFree(output_3); hipFree(output_4); hipFree(output_5); hipFree(output_6); hipFree(ft1); hipFree(it1); hipFree(cct1); hipFree(ot1); hipFree(ft2); hipFree(it2); hipFree(cct2); hipFree(ot2); hipFree(da); hipblasDestroy(handle); hipFree(d_a1); hipFree(d_c1); hipFree(d_a2); hipFree(d_c2); hipFree(door2); hipFree(door1); /* int nfields = mxGetNumberOfFields(prhs[2]);// printf("%d\n",nfields); //NStructElems = mxGetNumberOfElements(prhs[2]);// for (int ifield=0; ifield< nfields; ifield++){ printf("%s\n",mxGetFieldNameByNumber(prhs[2],ifield));// } printf("%d\n",mxGetN(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],1))));//mxArray *mxGetField(const mxArray *pm, mwIndex index, const char *fieldname) */ // mxArray *fout1,*fout2,*fout3,*fout4,*fout5,*fout6,*fout7,*fout8; const char *fieldnames[] = {"dw1_x","dw1_a1","db1","dw2_a1","dw2_a2","db2","dwy","dby"}; plhs[0]=mxCreateStructMatrix(1,1,8, fieldnames); const size_t dims1[]={4*n_a1,n_x}; fout1 = mxCreateNumericArray(2, dims1, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout1),d_w1_x ,sizeof(float)*4*n_a1*n_x,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 0, fout1); const size_t dims2[]={4*n_a1,n_a1}; fout2 = mxCreateNumericArray(2, dims2, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout2),d_w1_a1 ,sizeof(float)*4*n_a1*n_a1,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 1, fout2); const size_t dims3[]={4*n_a1,1}; fout3 = mxCreateNumericArray(2, dims3, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout3),d_b1 ,sizeof(float)*4*n_a1,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 2, fout3); const size_t dims4[]={4*n_a2,n_a1}; fout4 = mxCreateNumericArray(2, dims4, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout4),d_w2_a1 ,sizeof(float)*4*n_a2*n_a1,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 3, fout4); const size_t dims5[]={4*n_a2,n_a2}; fout5 = mxCreateNumericArray(2, dims5, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout5),d_w2_a2 ,sizeof(float)*4*n_a2*n_a2,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 4, fout5); const size_t dims6[]={4*n_a2,1}; fout6 = mxCreateNumericArray(2, dims6, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout6),d_b2 ,sizeof(float)*4*n_a2,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 5, fout6); const size_t dims7[]={n_y,n_a2}; fout7 = mxCreateNumericArray(2, dims7, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout7),d_wy ,sizeof(float)*n_a2*n_y,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 6, fout7); const size_t dims8[]={n_y,1}; fout8 = mxCreateNumericArray(2, dims8, mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(fout8),d_by ,sizeof(float)*n_y,hipMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 7, fout8); hipFree(d_w1_x); hipFree(d_w1_a1); hipFree(d_b1); hipFree(d_w2_a1); hipFree(d_w2_a2); hipFree(d_b2); hipFree(d_wy); hipFree(d_by); }
3a5c2d0075e4db9c0815585cf9887c3cd4be5302.cu
#include "mex.h" #include "stdio.h" #include <string.h> #include "cublas_v2.h" #include <math.h> #pragma comment(lib,"cublas.lib") #define blocksize 32 #define THREAD_NUM 256 #define BLOCK_NUM 512 #define eps1 1.0e-30 #define eps 1.0e2 __global__ void Active(float *output_x,float *output_a1,float *b1,float *ft1,float *it1,float *cct1,float *ot1,int sum,int n_a) { const int tid=threadIdx.x; const int bid=blockIdx.x; int p,q,n_a4=4*n_a; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { p=u/n_a4; q=u%n_a4; if(q<n_a) ft1[q+p*n_a]=1/(1+exp(-(output_x[u]+output_a1[u]+b1[q]))); else if(q>=n_a&&q<(2*n_a)) it1[q-n_a+p*n_a]=1/(1+exp(-(output_x[u]+output_a1[u]+b1[q]))); else if(q>=(2*n_a)&&q<(3*n_a)) cct1[q-2*n_a+p*n_a]=2/(1+exp(-2*(output_x[u]+output_a1[u]+b1[q])))-1; else ot1[q-3*n_a+p*n_a]=1/(1+exp(-(output_x[u]+output_a1[u]+b1[q]))); } } __global__ void pointwise(float *ft1,float *it1,float *cct1,float *ot1,float *a_next1,float *c_next1,float *c_prev1,int sum) { const int tid=threadIdx.x; const int bid=blockIdx.x; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { c_next1[u]=ft1[u]*c_prev1[u]+it1[u]*cct1[u]; a_next1[u]=ot1[u]*(2/(1+exp(-2*c_next1[u]))-1); } } __global__ void Add(float *a,float *by,int n_y,int m) { const int tid=threadIdx.x; const int bid=blockIdx.x; for(int u=tid+bid*THREAD_NUM;u<n_y*m;u+=BLOCK_NUM*THREAD_NUM) a[u]=exp(a[u]+by[u%n_y]); } __global__ void sum(float *a,float *b,int n_y,int m) { const int tid=threadIdx.x; const int bid=blockIdx.x; int offset=1,mask=1; __shared__ float shared[THREAD_NUM]; shared[tid]=0; for(int u=tid+bid*n_y;u<n_y*(bid+1);u+= 1) { shared[tid]+=a[u]; } while(offset<THREAD_NUM) { if (tid&mask == 0) { shared[tid] += shared[tid + offset]; } offset += offset; mask=offset+mask; __syncthreads(); } if(tid==0) { b[bid]=shared[0]; } } __global__ void out(float *a,float *b,float *y_pred,float *output_diff,float *y_t,float *error,int sum,int n_y) { const int tid=threadIdx.x; const int bid=blockIdx.x; int r,p; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { r=u/n_y; p=y_t[r]-1; y_pred[u]=a[u]/b[r]; if((u%n_y)==p) { output_diff[u]=1-y_pred[u]; error[r]+=-log(y_pred[u]); } else output_diff[u]=-y_pred[u]; } } double add(float *a,int m,int T_x) { double error=0; for(int i=0;i<m;i++) error=error+a[i]; return error/(m*T_x); } __global__ void Da(float *da_next2,float *da,int sum) { const int tid=threadIdx.x; const int bid=blockIdx.x; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { da_next2[u]=da_next2[u]+da[u]; } } __global__ void Dc(float *dc_next2,float *dc_prev2,float *da_next2,float *c_next2,float *ft2,float *ot2,int sum) { const int tid=threadIdx.x; const int bid=blockIdx.x; float r; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { r=2/(1+exp(-2*c_next2[u]))-1; dc_next2[u]=dc_next2[u]+ot2[u]*(1-r*r)*da_next2[u]; dc_prev2[u]=dc_next2[u]*ft2[u]; } } __global__ void d_door(float *door,float *da_next2,float *dc_next2,float *c_next2,float *c_prev2,float *ot2,float *it2,float *cct2,float *ft2,int sum,int n_a) { const int tid=threadIdx.x; const int bid=blockIdx.x; int p,q,n_a4=4*n_a,r; for(int u=tid+bid*THREAD_NUM;u<sum;u+=BLOCK_NUM*THREAD_NUM) { p=u/n_a4; q=u%n_a4; if(q<n_a) { r=p*n_a+q; door[q+p*n_a4]=dc_next2[r]*c_prev2[r]*ft2[r]*(1-ft2[r]); } else if(q>=n_a&&q<(2*n_a)) { r=p*n_a+q-n_a; door[q+p*n_a4]=dc_next2[r]*cct2[r]*it2[r]*(1-it2[r]); } else if(q>=(2*n_a)&&q<(3*n_a)) { r=p*n_a+q-2*n_a; door[q+p*n_a4]=dc_next2[r]*it2[r]*(1-cct2[r]*cct2[r]); } else { r=p*n_a+q-3*n_a; door[q+p*n_a4]=da_next2[r]*(2/(1+exp(-2*c_next2[r]))-1)*ot2[r]*(1-ot2[r]); } } } __global__ void d_bias(float *door,float *d_b2,int a,int b) { const int bid=blockIdx.x; for(int u=bid;u<a;u+=BLOCK_NUM) { for(int i=0;i<b;i++) { d_b2[u]=d_b2[u]+door[a*i+u]; } } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //[gradients,Allerror]=LSTM(train_x{num,1},train_y{num,1},parameters); const size_t *dim_array = mxGetDimensions(prhs[0]); int n_x=*dim_array,m=*(dim_array+1),T_x=*(dim_array+2); int n_a1=256,n_a2=256,n_y=5; size_t size_x=n_x*m*T_x*sizeof(float); size_t size_y=m*T_x*sizeof(float); size_t layer_1=n_a1*m*sizeof(float); size_t layer_2=n_a2*m*sizeof(float); //输入数据 float *x_batch=(float*)mxGetPr(prhs[0]),*y_batch=(float*)mxGetPr(prhs[1]); float *host_w1_x=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],0))); float *host_w1_a1=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],1))); float *host_b1=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],2))); float *host_w2_a1=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],3))); float *host_w2_a2=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],4))); float *host_b2=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],5))); float *host_wy=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],6))); float *host_by=(float*)mxGetPr(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],7))); //前向隐藏单元 float *a1,*c1,*a2,*c2; cudaMalloc((void**)&a1,layer_1*(T_x+1)); cudaMalloc((void**)&c1,layer_1*(T_x+1)); cudaMalloc((void**)&a2,layer_2*(T_x+1)); cudaMalloc((void**)&c2,layer_2*(T_x+1)); cudaMemset(a1,0,layer_1*(T_x+1)); cudaMemset(c1,0,layer_1*(T_x+1)); cudaMemset(a2,0,layer_2*(T_x+1)); cudaMemset(c2,0,layer_2*(T_x+1)); //输入数据(x,y,w)拷贝到GPU float *x_t,*y_t; cudaMalloc((void**)&x_t,size_x); cudaMalloc((void**)&y_t,size_y); cudaMemcpy(x_t,x_batch,size_x,cudaMemcpyHostToDevice); cudaMemcpy(y_t,y_batch,size_y,cudaMemcpyHostToDevice); float *w1_x,*w1_a1,*b1,*w2_a1,*w2_a2,*b2,*wy,*by; cudaMalloc((void**)&w1_x,4*n_x*n_a1*sizeof(float)); cudaMalloc((void**)&w1_a1,4*n_a1*n_a1*sizeof(float)); cudaMalloc((void**)&b1,4*n_a1*sizeof(float)); cudaMalloc((void**)&w2_a1,4*n_a1*n_a2*sizeof(float)); cudaMalloc((void**)&w2_a2,4*n_a2*n_a2*sizeof(float)); cudaMalloc((void**)&b2,4*n_a2*sizeof(float)); cudaMalloc((void**)&wy,n_y*n_a2*sizeof(float)); cudaMalloc((void**)&by,n_y*sizeof(float)); cudaMemcpy(w1_x,host_w1_x,4*n_x*n_a1*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(w1_a1,host_w1_a1,4*n_a1*n_a1*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(b1,host_b1,4*n_a1*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(w2_a1,host_w2_a1,4*n_a1*n_a2*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(w2_a2,host_w2_a2,4*n_a2*n_a2*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(b2,host_b2,4*n_a2*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(wy,host_wy,n_y*n_a2*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(by,host_by,n_y*sizeof(float),cudaMemcpyHostToDevice); float *output_1,*output_2,*output_3,*output_4,*output_5,*output_6; cudaMalloc((void**)&output_1,4*n_a1*m*sizeof(float)); cudaMalloc((void**)&output_2,4*n_a1*m*sizeof(float)); cudaMalloc((void**)&output_3,4*n_a2*m*sizeof(float)); cudaMalloc((void**)&output_4,4*n_a2*m*sizeof(float)); cudaMalloc((void**)&output_5,n_y*m*sizeof(float)); cudaMalloc((void**)&output_6,m*sizeof(float)); cudaMemset(output_6,0,m*sizeof(float)); float *ft1,*it1,*cct1,*ot1,*ft2,*it2,*cct2,*ot2; cudaMalloc((void**)&ft1,n_a1*m*sizeof(float)*T_x); cudaMalloc((void**)&it1,n_a1*m*sizeof(float)*T_x); cudaMalloc((void**)&cct1,n_a1*m*sizeof(float)*T_x); cudaMalloc((void**)&ot1,n_a1*m*sizeof(float)*T_x); cudaMalloc((void**)&ft2,n_a2*m*sizeof(float)*T_x); cudaMalloc((void**)&it2,n_a2*m*sizeof(float)*T_x); cudaMalloc((void**)&cct2,n_a2*m*sizeof(float)*T_x); cudaMalloc((void**)&ot2,n_a2*m*sizeof(float)*T_x); float *y_pred,*output_diff,*da,*error,*error_cpu=(float*)malloc(m*sizeof(float)); cudaMalloc((void**)&y_pred,n_y*m*sizeof(float)*T_x); cudaMalloc((void**)&output_diff,n_y*m*sizeof(float)*T_x); cudaMalloc((void**)&da,n_a2*m*sizeof(float)*T_x); cudaMalloc((void**)&error,m*sizeof(float)); cudaMemset(error,0,m*sizeof(float)); float alpha=1,beta=0,beta1=1; cublasHandle_t handle; cublasCreate(&handle); dim3 dimBlock(blocksize, blocksize); for(int t=1;t<=T_x;t++){ cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,4*n_a1,m,n_x,&alpha,w1_x, 4*n_a1, x_t+(t-1)*n_x*m,n_x,&beta,output_1,4*n_a1); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,4*n_a1,m,n_a1,&alpha,w1_a1, 4*n_a1, a1+(t-1)*n_a1*m,n_a1,&beta,output_2,4*n_a1); Active<< <BLOCK_NUM,THREAD_NUM>> >(output_1,output_2,b1,ft1+(t-1)*n_a1*m,it1+(t-1)*n_a1*m,cct1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,4*n_a1*m,n_a1); pointwise<< <BLOCK_NUM,THREAD_NUM>> >(ft1+(t-1)*n_a1*m,it1+(t-1)*n_a1*m,cct1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,a1+t*n_a1*m,c1+t*n_a1*m,c1+(t-1)*n_a1*m,n_a1*m); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,4*n_a2,m,n_a1,&alpha,w2_a1, 4*n_a2, a1+t*n_a1*m,n_a1,&beta,output_3,4*n_a2); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,4*n_a2,m,n_a2,&alpha,w2_a2, 4*n_a2, a2+(t-1)*n_a2*m,n_a2,&beta,output_4,4*n_a2); Active<< <BLOCK_NUM,THREAD_NUM>> >(output_3,output_4,b2,ft2+(t-1)*n_a2*m,it2+(t-1)*n_a2*m,cct2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,4*n_a2*m,n_a2); pointwise<< <BLOCK_NUM,THREAD_NUM>> >(ft2+(t-1)*n_a2*m,it2+(t-1)*n_a2*m,cct2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,a2+t*n_a2*m,c2+t*n_a2*m,c2+(t-1)*n_a2*m,n_a2*m); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,n_y,m,n_a2,&alpha,wy, n_y, a2+t*n_a2*m,n_a2,&beta,output_5,n_y); Add<< <BLOCK_NUM,THREAD_NUM>> >(output_5,by,n_y,m); sum<< <m,THREAD_NUM>> >(output_5,output_6,n_y,m); out<< <BLOCK_NUM,THREAD_NUM>> >(output_5,output_6,y_pred+(t-1)*n_y*m,output_diff+(t-1)*n_y*m,y_t+(t-1)*m,error,n_y*m,n_y); cublasSgemm(handle,CUBLAS_OP_T,CUBLAS_OP_N,n_a2,m,n_y,&alpha,wy, n_y, output_diff+(t-1)*n_y*m,n_y,&beta,da+(t-1)*n_a2*m,n_a2); } float *d_w1_x,*d_w1_a1,*d_b1,*d_w2_a1,*d_w2_a2,*d_b2,*d_wy,*d_by; cudaMalloc((void**)&d_w1_x,4*n_x*n_a1*sizeof(float)); cudaMalloc((void**)&d_w1_a1,4*n_a1*n_a1*sizeof(float)); cudaMalloc((void**)&d_b1,4*n_a1*sizeof(float)); cudaMalloc((void**)&d_w2_a1,4*n_a1*n_a2*sizeof(float)); cudaMalloc((void**)&d_w2_a2,4*n_a2*n_a2*sizeof(float)); cudaMalloc((void**)&d_b2,4*n_a2*sizeof(float)); cudaMalloc((void**)&d_wy,n_y*n_a2*sizeof(float)); cudaMalloc((void**)&d_by,n_y*sizeof(float)); cudaMemset(d_w1_x,0,4*n_x*n_a1*sizeof(float)); cudaMemset(d_w1_a1,0,4*n_a1*n_a1*sizeof(float)); cudaMemset(d_b1,0,4*n_a1*sizeof(float)); cudaMemset(d_w2_a1,0,4*n_a1*n_a2*sizeof(float)); cudaMemset(d_w2_a2,0,4*n_a2*n_a2*sizeof(float)); cudaMemset(d_b2,0,4*n_a2*sizeof(float)); cudaMemset(d_wy,0,n_y*n_a2*sizeof(float)); cudaMemset(d_by,0,n_y*sizeof(float)); float *d_a1,*d_c1,*d_a2,*d_c2; cudaMalloc((void**)&d_a1,layer_1*(T_x+1)); cudaMalloc((void**)&d_c1,layer_1*(T_x+1)); cudaMalloc((void**)&d_a2,layer_2*(T_x+1)); cudaMalloc((void**)&d_c2,layer_2*(T_x+1)); cudaMemset(d_a1,0,layer_1*(T_x+1)); cudaMemset(d_c1,0,layer_1*(T_x+1)); cudaMemset(d_a2,0,layer_2*(T_x+1)); cudaMemset(d_c2,0,layer_2*(T_x+1)); float *door2,*door1; cudaMalloc((void**)&door2,4*n_a2*m*sizeof(float)); cudaMalloc((void**)&door1,4*n_a1*m*sizeof(float)); for(int t=T_x;t>=1;t--){ Da<< <BLOCK_NUM,THREAD_NUM>> >(d_a2+t*n_a2*m,da+(t-1)*n_a2*m,n_a2*m); Dc<< <BLOCK_NUM,THREAD_NUM>> >(d_c2+t*n_a2*m,d_c2+(t-1)*n_a2*m,d_a2+t*n_a2*m,c2+t*n_a2*m,ft2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,n_a2*m); d_door<< <BLOCK_NUM,THREAD_NUM>> >(door2,d_a2+t*n_a2*m,d_c2+t*n_a2*m,c2+t*n_a2*m,c2+(t-1)*n_a2*m,ot2+(t-1)*n_a2*m,it2+(t-1)*n_a2*m,cct2+(t-1)*n_a2*m,ft2+(t-1)*n_a2*m,4*n_a2*m,n_a2); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_T,4*n_a2,n_a2,m,&alpha,door2, 4*n_a2, a2+(t-1)*n_a2*m,n_a2,&beta1,d_w2_a2,4*n_a2); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_T,4*n_a2,n_a1,m,&alpha,door2, 4*n_a2, a1+t*n_a1*m,n_a1,&beta1,d_w2_a1,4*n_a2); d_bias<< <BLOCK_NUM,1>> >(door2,d_b2,4*n_a2,m); cublasSgemm(handle,CUBLAS_OP_T,CUBLAS_OP_N,n_a2,m,4*n_a2,&alpha,w2_a2, 4*n_a2,door2 ,4*n_a2,&beta,d_a2+(t-1)*n_a2*m,n_a2); cublasSgemm(handle,CUBLAS_OP_T,CUBLAS_OP_N,n_a1,m,4*n_a2,&alpha,w2_a1, 4*n_a2,door2, 4*n_a2,&beta1,d_a1+t*n_a1*m,n_a1); Dc<< <BLOCK_NUM,THREAD_NUM>> >(d_c1+t*n_a1*m,d_c1+(t-1)*n_a1*m,d_a1+t*n_a1*m,c1+t*n_a1*m,ft1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,n_a1*m); d_door<< <BLOCK_NUM,THREAD_NUM>> >(door1,d_a1+t*n_a1*m,d_c1+t*n_a1*m,c1+t*n_a1*m,c1+(t-1)*n_a1*m,ot1+(t-1)*n_a1*m,it1+(t-1)*n_a1*m,cct1+(t-1)*n_a1*m,ft1+(t-1)*n_a1*m,4*n_a1*m,n_a1); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_T,4*n_a1,n_a1,m,&alpha,door1, 4*n_a1, a1+(t-1)*n_a1*m,n_a1,&beta1,d_w1_a1,4*n_a1); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_T,4*n_a1,n_x,m,&alpha,door1, 4*n_a1, x_t+(t-1)*n_x*m,n_x,&beta1,d_w1_x,4*n_a1); d_bias<< <BLOCK_NUM,1>> >(door1,d_b1,4*n_a1,m); cublasSgemm(handle,CUBLAS_OP_T,CUBLAS_OP_N,n_a1,m,4*n_a1,&alpha,w1_a1, 4*n_a1,door1 ,4*n_a1,&beta,d_a1+(t-1)*n_a1*m,n_a1); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_T,n_y,n_a2,m,&alpha,output_diff+(t-1)*n_y*m, n_y,a2+t*n_a2*m,n_a2,&beta1,d_wy,n_y); d_bias<< <BLOCK_NUM,1>> >(output_diff+(t-1)*n_y*m,d_by,n_y,m); } /* const size_t dim[]={1,m}; plhs[2] = mxCreateNumericArray(2,dim ,mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(plhs[2]),output_6,m*sizeof(float), cudaMemcpyDeviceToHost); */ cudaMemcpy(error_cpu,error,m*sizeof(float),cudaMemcpyDeviceToHost); double *Allerror; plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL); Allerror = mxGetPr(plhs[1]); *Allerror=add(error_cpu,m,T_x); cudaFree(a1); cudaFree(c1); cudaFree(a2); cudaFree(c2); cudaFree(x_t); cudaFree(y_t); cudaFree(y_pred); cudaFree(output_diff); cudaFree(error); free(error_cpu); cudaFree(w1_x); cudaFree(w1_a1); cudaFree(b1); cudaFree(w2_a1); cudaFree(w2_a2); cudaFree(b2); cudaFree(wy); cudaFree(by); cudaFree(output_1); cudaFree(output_2); cudaFree(output_3); cudaFree(output_4); cudaFree(output_5); cudaFree(output_6); cudaFree(ft1); cudaFree(it1); cudaFree(cct1); cudaFree(ot1); cudaFree(ft2); cudaFree(it2); cudaFree(cct2); cudaFree(ot2); cudaFree(da); cublasDestroy(handle); cudaFree(d_a1); cudaFree(d_c1); cudaFree(d_a2); cudaFree(d_c2); cudaFree(door2); cudaFree(door1); /* int nfields = mxGetNumberOfFields(prhs[2]);//获取结构体中变量的个数 printf("%d\n",nfields); //NStructElems = mxGetNumberOfElements(prhs[2]);//获取结构体数组中的结构体的个数 for (int ifield=0; ifield< nfields; ifield++){ printf("%s\n",mxGetFieldNameByNumber(prhs[2],ifield));//获取单个结构体字段的名字 } printf("%d\n",mxGetN(mxGetField(prhs[2],0,mxGetFieldNameByNumber(prhs[2],1))));//mxArray *mxGetField(const mxArray *pm, mwIndex index, const char *fieldname) */ //输出 mxArray *fout1,*fout2,*fout3,*fout4,*fout5,*fout6,*fout7,*fout8; const char *fieldnames[] = {"dw1_x","dw1_a1","db1","dw2_a1","dw2_a2","db2","dwy","dby"}; plhs[0]=mxCreateStructMatrix(1,1,8, fieldnames); const size_t dims1[]={4*n_a1,n_x}; fout1 = mxCreateNumericArray(2, dims1, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout1),d_w1_x ,sizeof(float)*4*n_a1*n_x,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 0, fout1); const size_t dims2[]={4*n_a1,n_a1}; fout2 = mxCreateNumericArray(2, dims2, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout2),d_w1_a1 ,sizeof(float)*4*n_a1*n_a1,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 1, fout2); const size_t dims3[]={4*n_a1,1}; fout3 = mxCreateNumericArray(2, dims3, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout3),d_b1 ,sizeof(float)*4*n_a1,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 2, fout3); const size_t dims4[]={4*n_a2,n_a1}; fout4 = mxCreateNumericArray(2, dims4, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout4),d_w2_a1 ,sizeof(float)*4*n_a2*n_a1,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 3, fout4); const size_t dims5[]={4*n_a2,n_a2}; fout5 = mxCreateNumericArray(2, dims5, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout5),d_w2_a2 ,sizeof(float)*4*n_a2*n_a2,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 4, fout5); const size_t dims6[]={4*n_a2,1}; fout6 = mxCreateNumericArray(2, dims6, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout6),d_b2 ,sizeof(float)*4*n_a2,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 5, fout6); const size_t dims7[]={n_y,n_a2}; fout7 = mxCreateNumericArray(2, dims7, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout7),d_wy ,sizeof(float)*n_a2*n_y,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 6, fout7); const size_t dims8[]={n_y,1}; fout8 = mxCreateNumericArray(2, dims8, mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(fout8),d_by ,sizeof(float)*n_y,cudaMemcpyDeviceToHost); mxSetFieldByNumber(plhs[0], 0, 7, fout8); cudaFree(d_w1_x); cudaFree(d_w1_a1); cudaFree(d_b1); cudaFree(d_w2_a1); cudaFree(d_w2_a2); cudaFree(d_b2); cudaFree(d_wy); cudaFree(d_by); }
b8f371c18213545a941451ddb565e6f3af4e4c52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <memory> #include <vector> #include "dali/core/util.h" #include "dali/test/test_tensors.h" #include "dali/kernels/signal/window/extract_windows_gpu.cuh" #include "dali/kernels/scratch.h" #include "dali/kernels/signal/window/window_functions.h" namespace dali { namespace kernels { namespace signal { TEST(ExtractWindowsGPU, NonBatchedKernel) { float *in_gpu, *out_gpu; int winlen = 60; int outwinlen = 63; int windows = 80; int stride = windows; int step = 10; int length = windows * step - 100;; int center = 5; CUDA_CALL(hipMalloc(&in_gpu, sizeof(float)*length)); CUDA_CALL(hipMalloc(&out_gpu, sizeof(float)*windows*outwinlen)); std::vector<float> in(length), out(windows*outwinlen); for (int i = 0; i < length; i++) { in[i] = i + 1000; } for (bool reflect : {true, false}) { CUDA_CALL(hipMemcpy(in_gpu, in.data(), sizeof(float)*length, hipMemcpyHostToDevice)); CUDA_CALL(hipMemset(out_gpu, 0xff, sizeof(float)*windows*outwinlen)); int xblocks = div_ceil(length, 32); int yblocks = div_ceil(winlen, 32); hipLaunchKernelGGL(( window::ExtractVerticalWindowsKernel), dim3(dim3(xblocks, yblocks)), dim3(dim3(32, 32)), 0, 0, out_gpu, windows, stride, in_gpu, length, nullptr, winlen, outwinlen, center, step, reflect); CUDA_CALL( hipMemcpy(out.data(), out_gpu, sizeof(float)*winlen*windows, hipMemcpyDeviceToHost)); CUDA_CALL(hipDeviceSynchronize()); for (int w = 0; w < windows; w++) { for (int i = 0; i < winlen; i++) { int idx = w * step + i - center; if (reflect) idx = boundary::idx_reflect_101(idx, 0, length); float ref = idx >= 0 && idx < length ? in[idx] : 0; EXPECT_EQ(out[w + i*stride], ref) << "@ window = " << w << ", index = " << i; } for (int i = winlen; i < outwinlen; i++) { EXPECT_EQ(out[w + i*stride], 0) << "padding @ window = " << w << ", index = " << i; } } if (HasFailure()) { std::cout << "Debug: Extract window actual output:\n"; for (int i = 0; i < outwinlen; i++) { for (int j = 0; j < windows; j++) { std::cout << out[i*stride+j] << " "; } std::cout << "\n"; } std::cout << std::flush; } } hipFree(in_gpu); hipFree(out_gpu); } void TestBatchedExtract( ExtractWindowsImplGPU<float, float> *extract, const TensorListShape<1> &lengths, bool concatenate, Padding padding, span<const float> window, int out_win_len = -1) { bool vertical = extract->IsVertical(); ScratchpadAllocator sa; int N = lengths.num_samples(); ptrdiff_t total_length = 0; for (int i = 0; i < N; i++) { total_length += lengths[i][0]; } TestTensorList<float, 1> in_list; in_list.reshape(lengths); auto in_cpu = in_list.cpu(); for (int i = 0; i < N; i++) { for (int j = 0; j < lengths[i][0]; j++) in_cpu[i].data[j] = 1000*(i+1)+j; } ExtractWindowsArgs args; args.window_length = window.empty() ? 55 : window.size(); args.window_center = window.empty() ? 21 : window.size()/2; args.window_step = 2; args.padding = padding; int out_win_len_actual = out_win_len < 0 ? args.window_length : out_win_len; KernelContext ctx; auto in_gpu = in_list.gpu(0); auto req = extract->Setup(ctx, make_span(lengths.shapes), args, concatenate, out_win_len); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0].num_samples(), concatenate ? 1 : N); sa.Reserve(req.scratch_sizes); auto scratchpad = sa.GetScratchpad(); ctx.scratchpad = &scratchpad; TestTensorList<float, 2> out; memory::KernelUniquePtr<float> gpu_win; if (!window.empty()) { gpu_win = memory::alloc_unique<float>(AllocType::GPU, window.size()); CUDA_CALL( hipMemcpy(gpu_win.get(), window.data(), sizeof(float)*window.size(), hipMemcpyHostToDevice)); } auto window_gpu = make_tensor_gpu<1>(gpu_win.get(), { window.size() }); out.reshape(req.output_shapes[0].to_static<2>()); auto out_gpu = out.gpu(0); CUDA_CALL(hipMemset(out_gpu.data[0], 0xff, sizeof(float)*out_gpu.shape.num_elements())); extract->Run(ctx, out_gpu, in_gpu, window_gpu); auto out_cpu = out.cpu(); ptrdiff_t ofs = 0; for (int sample = 0; sample < N; sample++) { ptrdiff_t length = lengths[sample][0]; int nwnd = args.num_windows(length); int out_sample = 0; if (!concatenate) { ofs = 0; out_sample = sample; } ptrdiff_t sample_stride = vertical ? out_cpu.shape[out_sample][1] : 1; ptrdiff_t window_stride = vertical ? 1 : out_cpu.shape[out_sample][1]; for (int w = 0; w < nwnd; w++, ofs += window_stride) { int i = 0; for (; i < args.window_length; i++) { ptrdiff_t idx = w * args.window_step + i - args.window_center; if (args.padding == Padding::Reflect) { idx = boundary::idx_reflect_101(idx, length); } float ref = idx >= 0 && idx < length ? in_cpu.data[sample][idx] : 0; if (!window.empty()) ref *= window[i]; ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], ref) << "@ sample = " << sample << ", window = " << w << ", index = " << i; } for (; i < out_win_len_actual; i++) { ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], 0) << "padding @ sample = " << sample << ", window = " << w << ", index = " << i; } } } } void TestBatchedExtract( const TensorListShape<1> &lengths, bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } void TestBatchedExtract( bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TensorListShape<1> lengths = {{ 5, 305, 157 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); lengths = {{ 137, 203, 150, 12 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } TEST(ExtractVerticalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(true, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, SizeSweep) { int max_size = 2048; std::vector<TensorShape<1>> lengths; int step = 1; for (int s = 1; s <= max_size; s+=step) { if ((s&255) == 0) { if (step > 1) // add 2^n-1 lengths.push_back({s-1}); step += step; } lengths.push_back({s}); } TensorListShape<1> shape(lengths); vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(shape, false, Padding::Reflect, false, make_cspan(window)); } } // namespace signal } // namespace kernels } // namespace dali
b8f371c18213545a941451ddb565e6f3af4e4c52.cu
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <memory> #include <vector> #include "dali/core/util.h" #include "dali/test/test_tensors.h" #include "dali/kernels/signal/window/extract_windows_gpu.cuh" #include "dali/kernels/scratch.h" #include "dali/kernels/signal/window/window_functions.h" namespace dali { namespace kernels { namespace signal { TEST(ExtractWindowsGPU, NonBatchedKernel) { float *in_gpu, *out_gpu; int winlen = 60; int outwinlen = 63; int windows = 80; int stride = windows; int step = 10; int length = windows * step - 100;; int center = 5; CUDA_CALL(cudaMalloc(&in_gpu, sizeof(float)*length)); CUDA_CALL(cudaMalloc(&out_gpu, sizeof(float)*windows*outwinlen)); std::vector<float> in(length), out(windows*outwinlen); for (int i = 0; i < length; i++) { in[i] = i + 1000; } for (bool reflect : {true, false}) { CUDA_CALL(cudaMemcpy(in_gpu, in.data(), sizeof(float)*length, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemset(out_gpu, 0xff, sizeof(float)*windows*outwinlen)); int xblocks = div_ceil(length, 32); int yblocks = div_ceil(winlen, 32); window::ExtractVerticalWindowsKernel<<<dim3(xblocks, yblocks), dim3(32, 32)>>>( out_gpu, windows, stride, in_gpu, length, nullptr, winlen, outwinlen, center, step, reflect); CUDA_CALL( cudaMemcpy(out.data(), out_gpu, sizeof(float)*winlen*windows, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaDeviceSynchronize()); for (int w = 0; w < windows; w++) { for (int i = 0; i < winlen; i++) { int idx = w * step + i - center; if (reflect) idx = boundary::idx_reflect_101(idx, 0, length); float ref = idx >= 0 && idx < length ? in[idx] : 0; EXPECT_EQ(out[w + i*stride], ref) << "@ window = " << w << ", index = " << i; } for (int i = winlen; i < outwinlen; i++) { EXPECT_EQ(out[w + i*stride], 0) << "padding @ window = " << w << ", index = " << i; } } if (HasFailure()) { std::cout << "Debug: Extract window actual output:\n"; for (int i = 0; i < outwinlen; i++) { for (int j = 0; j < windows; j++) { std::cout << out[i*stride+j] << " "; } std::cout << "\n"; } std::cout << std::flush; } } cudaFree(in_gpu); cudaFree(out_gpu); } void TestBatchedExtract( ExtractWindowsImplGPU<float, float> *extract, const TensorListShape<1> &lengths, bool concatenate, Padding padding, span<const float> window, int out_win_len = -1) { bool vertical = extract->IsVertical(); ScratchpadAllocator sa; int N = lengths.num_samples(); ptrdiff_t total_length = 0; for (int i = 0; i < N; i++) { total_length += lengths[i][0]; } TestTensorList<float, 1> in_list; in_list.reshape(lengths); auto in_cpu = in_list.cpu(); for (int i = 0; i < N; i++) { for (int j = 0; j < lengths[i][0]; j++) in_cpu[i].data[j] = 1000*(i+1)+j; } ExtractWindowsArgs args; args.window_length = window.empty() ? 55 : window.size(); args.window_center = window.empty() ? 21 : window.size()/2; args.window_step = 2; args.padding = padding; int out_win_len_actual = out_win_len < 0 ? args.window_length : out_win_len; KernelContext ctx; auto in_gpu = in_list.gpu(0); auto req = extract->Setup(ctx, make_span(lengths.shapes), args, concatenate, out_win_len); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0].num_samples(), concatenate ? 1 : N); sa.Reserve(req.scratch_sizes); auto scratchpad = sa.GetScratchpad(); ctx.scratchpad = &scratchpad; TestTensorList<float, 2> out; memory::KernelUniquePtr<float> gpu_win; if (!window.empty()) { gpu_win = memory::alloc_unique<float>(AllocType::GPU, window.size()); CUDA_CALL( cudaMemcpy(gpu_win.get(), window.data(), sizeof(float)*window.size(), cudaMemcpyHostToDevice)); } auto window_gpu = make_tensor_gpu<1>(gpu_win.get(), { window.size() }); out.reshape(req.output_shapes[0].to_static<2>()); auto out_gpu = out.gpu(0); CUDA_CALL(cudaMemset(out_gpu.data[0], 0xff, sizeof(float)*out_gpu.shape.num_elements())); extract->Run(ctx, out_gpu, in_gpu, window_gpu); auto out_cpu = out.cpu(); ptrdiff_t ofs = 0; for (int sample = 0; sample < N; sample++) { ptrdiff_t length = lengths[sample][0]; int nwnd = args.num_windows(length); int out_sample = 0; if (!concatenate) { ofs = 0; out_sample = sample; } ptrdiff_t sample_stride = vertical ? out_cpu.shape[out_sample][1] : 1; ptrdiff_t window_stride = vertical ? 1 : out_cpu.shape[out_sample][1]; for (int w = 0; w < nwnd; w++, ofs += window_stride) { int i = 0; for (; i < args.window_length; i++) { ptrdiff_t idx = w * args.window_step + i - args.window_center; if (args.padding == Padding::Reflect) { idx = boundary::idx_reflect_101(idx, length); } float ref = idx >= 0 && idx < length ? in_cpu.data[sample][idx] : 0; if (!window.empty()) ref *= window[i]; ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], ref) << "@ sample = " << sample << ", window = " << w << ", index = " << i; } for (; i < out_win_len_actual; i++) { ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], 0) << "padding @ sample = " << sample << ", window = " << w << ", index = " << i; } } } } void TestBatchedExtract( const TensorListShape<1> &lengths, bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } void TestBatchedExtract( bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TensorListShape<1> lengths = {{ 5, 305, 157 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); lengths = {{ 137, 203, 150, 12 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } TEST(ExtractVerticalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(true, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, SizeSweep) { int max_size = 2048; std::vector<TensorShape<1>> lengths; int step = 1; for (int s = 1; s <= max_size; s+=step) { if ((s&255) == 0) { if (step > 1) // add 2^n-1 lengths.push_back({s-1}); step += step; } lengths.push_back({s}); } TensorListShape<1> shape(lengths); vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(shape, false, Padding::Reflect, false, make_cspan(window)); } } // namespace signal } // namespace kernels } // namespace dali
23ebd30e71058cce7052968813a53dda10d9a22c.hip
// !!! This is a file automatically generated by hipify!!! #include "SequenceVisitor.cuh" #include "VeloUT.cuh" template<> void SequenceVisitor::set_arguments_size<veloUT_t>( veloUT_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { arguments.set_size<dev_ut_tracks>(host_buffers.host_number_of_selected_events[0] * UT::Constants::max_num_tracks); arguments.set_size<dev_atomics_ut>(host_buffers.host_number_of_selected_events[0] * UT::num_atomics + 1); } template<> void SequenceVisitor::visit<veloUT_t>( veloUT_t& state, const veloUT_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, hipStream_t& cuda_stream, hipEvent_t& cuda_generic_event) { state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(32), cuda_stream); state.set_arguments( arguments.offset<dev_ut_hits>(), arguments.offset<dev_ut_hit_offsets>(), arguments.offset<dev_atomics_velo>(), arguments.offset<dev_velo_track_hit_number>(), arguments.offset<dev_velo_track_hits>(), arguments.offset<dev_velo_states>(), arguments.offset<dev_ut_tracks>(), arguments.offset<dev_atomics_ut>(), constants.dev_ut_magnet_tool, constants.dev_ut_dxDy, constants.dev_unique_x_sector_layer_offsets, constants.dev_unique_x_sector_offsets, constants.dev_unique_sector_xs); state.invoke(); }
23ebd30e71058cce7052968813a53dda10d9a22c.cu
#include "SequenceVisitor.cuh" #include "VeloUT.cuh" template<> void SequenceVisitor::set_arguments_size<veloUT_t>( veloUT_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { arguments.set_size<dev_ut_tracks>(host_buffers.host_number_of_selected_events[0] * UT::Constants::max_num_tracks); arguments.set_size<dev_atomics_ut>(host_buffers.host_number_of_selected_events[0] * UT::num_atomics + 1); } template<> void SequenceVisitor::visit<veloUT_t>( veloUT_t& state, const veloUT_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, cudaStream_t& cuda_stream, cudaEvent_t& cuda_generic_event) { state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(32), cuda_stream); state.set_arguments( arguments.offset<dev_ut_hits>(), arguments.offset<dev_ut_hit_offsets>(), arguments.offset<dev_atomics_velo>(), arguments.offset<dev_velo_track_hit_number>(), arguments.offset<dev_velo_track_hits>(), arguments.offset<dev_velo_states>(), arguments.offset<dev_ut_tracks>(), arguments.offset<dev_atomics_ut>(), constants.dev_ut_magnet_tool, constants.dev_ut_dxDy, constants.dev_unique_x_sector_layer_offsets, constants.dev_unique_x_sector_offsets, constants.dev_unique_sector_xs); state.invoke(); }
dae96e8df9f589da1e1b4f1e408e0e14c7e12f05.hip
// !!! This is a file automatically generated by hipify!!! //sys #include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <string.h> //my #include "hardswish.h" #define NV_CUDA_CHECK(status) \ { \ if (status != 0) \ { \ std::cout << "Cuda failure: " << hipGetErrorString(status) << " in file " << __FILE__ \ << " at line " << __LINE__ << std::endl; \ abort(); \ } \ } namespace nvinfer1 { Hardswish::Hardswish() { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); _n_max_thread_pre_block = prop.maxThreadsPerBlock; } Hardswish::Hardswish(const void* data, size_t length) { const char *d = reinterpret_cast<const char*>(data), *a = d; r(d, _n_max_thread_pre_block); r(d, _n_output_size); assert(d == a + length); } __global__ void kernel_hardswish(const float *input_, float *output_, int n_data_size_) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n_data_size_)return; if (input_[i] >= 3.0f) { output_[i] = input_[i]; } else if (input_[i] <= -3.0f) { output_[i] = 0.0f; } else { output_[i] = input_[i] * (input_[i] + 3.0f) / 6.0f; } } hipError_t cuda_hardswish_layer(const void* input_, void* output_, const int n_batch_size_, const int n_output_size_, const int threads_, hipStream_t stream_) { int n_data_size = n_batch_size_ * n_output_size_; kernel_hardswish << <(n_data_size + threads_ -1)/threads_, threads_ >> >( reinterpret_cast<const float*>(input_), reinterpret_cast<float*>(output_), n_data_size); return hipGetLastError(); } int Hardswish::enqueue(int batchSize, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) noexcept { //printf("batch_size:%d,output_size:%d,threads:%d\n", batchSize, _n_output_size, _n_max_thread_pre_block); NV_CUDA_CHECK(cuda_hardswish_layer(inputs[0], outputs[0], batchSize, _n_output_size , _n_max_thread_pre_block,stream)); return 0; } int Hardswish::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) noexcept { return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream); } size_t Hardswish::getSerializationSize() const noexcept { return sizeof(_n_max_thread_pre_block) +sizeof(_n_output_size); } void Hardswish::serialize(void *buffer) const noexcept { char *d = static_cast<char*>(buffer), *a = d; w(d, _n_max_thread_pre_block); w(d, _n_output_size); assert(d == a + getSerializationSize()); } bool Hardswish::supportsFormat(DataType type, PluginFormat format) const noexcept { return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } void Hardswish::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept { } void Hardswish::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) noexcept { _n_output_size = in->dims.d[0] * in->dims.d[1] * in->dims.d[2]; // printf("output_size:%d,threads:%d\n", _n_output_size, _n_max_thread_pre_block); } IPluginV2* Hardswish::clone() const noexcept { Hardswish *p = new Hardswish(); p->setPluginNamespace(_s_plugin_namespace.c_str()); p->_n_max_thread_pre_block = _n_max_thread_pre_block; p->_n_output_size = _n_output_size; return p; } // PluginFieldCollection HardswishPluginCreator::_fc{}; std::vector<PluginField> HardswishPluginCreator::_vec_plugin_attributes; HardswishPluginCreator::HardswishPluginCreator() { _vec_plugin_attributes.clear(); _fc.nbFields = _vec_plugin_attributes.size(); _fc.fields = _vec_plugin_attributes.data(); } const char* HardswishPluginCreator::getPluginName() const noexcept { return "HARDSWISH_TRT"; } const char* HardswishPluginCreator::getPluginVersion() const noexcept { return "1.0"; } const PluginFieldCollection* HardswishPluginCreator::getFieldNames() noexcept { return &_fc; } IPluginV2* HardswishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept { Hardswish* obj = new Hardswish(); obj->setPluginNamespace(_s_name_space.c_str()); return obj; } IPluginV2* HardswishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept { Hardswish* obj = new Hardswish(serialData, serialLength); obj->setPluginNamespace(_s_name_space.c_str()); return obj; } void HardswishPluginCreator::setPluginNamespace(const char* libNamespace) noexcept { _s_name_space = libNamespace; } const char* HardswishPluginCreator::getPluginNamespace() const noexcept { return _s_name_space.c_str(); } }//end namespace nvinfer1
dae96e8df9f589da1e1b4f1e408e0e14c7e12f05.cu
//sys #include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include <cuda_runtime.h> #include <cuda.h> #include <stdint.h> #include <string.h> //my #include "hardswish.h" #define NV_CUDA_CHECK(status) \ { \ if (status != 0) \ { \ std::cout << "Cuda failure: " << cudaGetErrorString(status) << " in file " << __FILE__ \ << " at line " << __LINE__ << std::endl; \ abort(); \ } \ } namespace nvinfer1 { Hardswish::Hardswish() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); _n_max_thread_pre_block = prop.maxThreadsPerBlock; } Hardswish::Hardswish(const void* data, size_t length) { const char *d = reinterpret_cast<const char*>(data), *a = d; r(d, _n_max_thread_pre_block); r(d, _n_output_size); assert(d == a + length); } __global__ void kernel_hardswish(const float *input_, float *output_, int n_data_size_) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n_data_size_)return; if (input_[i] >= 3.0f) { output_[i] = input_[i]; } else if (input_[i] <= -3.0f) { output_[i] = 0.0f; } else { output_[i] = input_[i] * (input_[i] + 3.0f) / 6.0f; } } cudaError_t cuda_hardswish_layer(const void* input_, void* output_, const int n_batch_size_, const int n_output_size_, const int threads_, cudaStream_t stream_) { int n_data_size = n_batch_size_ * n_output_size_; kernel_hardswish << <(n_data_size + threads_ -1)/threads_, threads_ >> >( reinterpret_cast<const float*>(input_), reinterpret_cast<float*>(output_), n_data_size); return cudaGetLastError(); } int Hardswish::enqueue(int batchSize, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { //printf("batch_size:%d,output_size:%d,threads:%d\n", batchSize, _n_output_size, _n_max_thread_pre_block); NV_CUDA_CHECK(cuda_hardswish_layer(inputs[0], outputs[0], batchSize, _n_output_size , _n_max_thread_pre_block,stream)); return 0; } int Hardswish::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) noexcept { return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream); } size_t Hardswish::getSerializationSize() const noexcept { return sizeof(_n_max_thread_pre_block) +sizeof(_n_output_size); } void Hardswish::serialize(void *buffer) const noexcept { char *d = static_cast<char*>(buffer), *a = d; w(d, _n_max_thread_pre_block); w(d, _n_output_size); assert(d == a + getSerializationSize()); } bool Hardswish::supportsFormat(DataType type, PluginFormat format) const noexcept { return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } void Hardswish::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept { } void Hardswish::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) noexcept { _n_output_size = in->dims.d[0] * in->dims.d[1] * in->dims.d[2]; // printf("output_size:%d,threads:%d\n", _n_output_size, _n_max_thread_pre_block); } IPluginV2* Hardswish::clone() const noexcept { Hardswish *p = new Hardswish(); p->setPluginNamespace(_s_plugin_namespace.c_str()); p->_n_max_thread_pre_block = _n_max_thread_pre_block; p->_n_output_size = _n_output_size; return p; } // PluginFieldCollection HardswishPluginCreator::_fc{}; std::vector<PluginField> HardswishPluginCreator::_vec_plugin_attributes; HardswishPluginCreator::HardswishPluginCreator() { _vec_plugin_attributes.clear(); _fc.nbFields = _vec_plugin_attributes.size(); _fc.fields = _vec_plugin_attributes.data(); } const char* HardswishPluginCreator::getPluginName() const noexcept { return "HARDSWISH_TRT"; } const char* HardswishPluginCreator::getPluginVersion() const noexcept { return "1.0"; } const PluginFieldCollection* HardswishPluginCreator::getFieldNames() noexcept { return &_fc; } IPluginV2* HardswishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept { Hardswish* obj = new Hardswish(); obj->setPluginNamespace(_s_name_space.c_str()); return obj; } IPluginV2* HardswishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept { Hardswish* obj = new Hardswish(serialData, serialLength); obj->setPluginNamespace(_s_name_space.c_str()); return obj; } void HardswishPluginCreator::setPluginNamespace(const char* libNamespace) noexcept { _s_name_space = libNamespace; } const char* HardswishPluginCreator::getPluginNamespace() const noexcept { return _s_name_space.c_str(); } }//end namespace nvinfer1
4be67f01143de68d777fdd31cc10b202ff72c844.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" // CUDA #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "rocblas.h" #include "cudaCommon.h" #include "cudaStatics.h" /* THIS FUNCTION: cudaStatics is used in the imposition of several kinds of boundary conditions upon arrays. Given a list of indices I, coefficients C and values V, it writes out phi[I] = (1-C)*phi[I] + C[i]*V[i], causing phi[I] to fade to V[i] at an exponential rate. It is also able to set mirror boundary conditions (FIXME: Not fully tested!) */ /* FIXME: rewrite this crap with template<>s */ /* X DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS FOR MIRROR BCS */ /* Assume a block size of [3 A B] */ __global__ void cukern_xminusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_xminusAntisymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_xplusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_xplusAntisymmetrize(double *phi, int nx, int ny, int nz); /* Y DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* assume a block size of [N 1 M] */ __global__ void cukern_yminusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_yminusAntisymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_yplusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_yplusAntisymmetrize(double *phi, int nx, int ny, int nz); /* Z DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* Assume launch with size [U V 1] */ __global__ void cukern_zminusSymmetrize(double *Phi, int nx, int ny, int nz); __global__ void cukern_zminusAntisymmetrize(double *Phi, int nx, int ny, int nz); __global__ void cukern_zplusSymmetrize(double *Phi, int nx, int ny, int nz); __global__ void cukern_zplusAntisymmetrize(double *Phi, int nx, int ny, int nz); /* X direction extrapolated boundary conditions */ /* Launch size [3 A B] */ __global__ void cukern_extrapolateLinearBdyXMinus(double *phi, int nx, int ny, int nz); __global__ void cukern_extrapolateLinearBdyXPlus(double *phi, int nx, int ny, int nz); __global__ void cukern_extrapolateConstBdyXMinus(double *phi, int nx, int ny, int nz); __global__ void cukern_extrapolateConstBdyXPlus(double *phi, int nx, int ny, int nz); __global__ void cukern_applySpecial_fade(double *phi, double *statics, int nSpecials, int blkOffset); int setBoundarySAS(MGArray *phi, int side, int direction, int sas); #ifdef STANDALONE_MEX_FUNCTION void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if( (nlhs != 0) || (nrhs != 3)) { mexErrMsgTxt("cudaStatics operator is cudaStatics(ImogenArray, blockdim, direction)"); } CHECK_CUDA_ERROR("entering cudaStatics"); setBoundaryConditions(prhs[0], (int)*mxGetPr(prhs[2])); } #endif /* FIXME: This is terrible. * FIXME: MGArray needs to provision carrying its own boundary condition metadata around somehow. */ int setBoundaryConditions(const mxArray *matlabhandle, int direction) { CHECK_CUDA_ERROR("entering setBoundaryConditions"); MGArray phi, statics; int worked = MGA_accessMatlabArrays((const mxArray **)&matlabhandle, 0, 0, &phi); BAIL_ON_FAIL(worked) /* Grabs the whole boundaryData struct from the ImogenArray class */ mxArray *boundaryData = mxGetProperty(matlabhandle, 0, "boundaryData"); if(boundaryData == NULL) { printf("FATAL: field 'boundaryData' D.N.E. in class. Not a class? Not a FluidArray/MagnetArray/InitializedArray?\n"); return ERROR_INVALID_ARGS; } /* The statics describe "solid" structures which we force the grid to have */ mxArray *gpuStatics = mxGetField(boundaryData, 0, "staticsData"); if(gpuStatics == NULL) { printf("FATAL: field 'staticsData' D.N.E. in boundaryData struct. Statics not compiled?\n"); return ERROR_INVALID_ARGS; } worked = MGA_accessMatlabArrays((const mxArray **)(&gpuStatics), 0, 0, &statics); BAIL_ON_FAIL(worked) int *perm = &phi.currentPermutation[0]; int offsetidx = 2*(perm[0]-1) + 1*(perm[1] > perm[2]); /* The offset array describes the index offsets for the data in the gpuStatics array */ mxArray *offsets = mxGetField(boundaryData, 0, "compOffset"); if(offsets == NULL) { printf("FATAL: field 'compOffset' D.N.E. in boundaryData. Not an ImogenArray? Statics not compiled?\n"); return ERROR_INVALID_ARGS; } double *offsetcount = mxGetPr(offsets); long int staticsOffset = (long int)offsetcount[2*offsetidx]; int staticsNumel = (int)offsetcount[2*offsetidx+1]; /* Parameter describes what block size to launch with... */ int blockdim = 8; dim3 griddim; griddim.x = staticsNumel / blockdim + 1; if(griddim.x > 32768) { griddim.x = 32768; griddim.y = staticsNumel/(blockdim*griddim.x) + 1; } /* Every call results in applying specials */ if(statics.numel > 0) { PAR_WARN(phi); hipLaunchKernelGGL(( cukern_applySpecial_fade), dim3(griddim), dim3(blockdim), 0, 0, phi.devicePtr[0], statics.devicePtr[0] + staticsOffset, statics.numel, statics.dim[0]); worked = CHECK_CUDA_LAUNCH_ERROR(blockdim, griddim, &phi, 0, "cuda statics application"); if(worked != SUCCESSFUL) return worked; } /* Indicates which part of a 3-vector this array is (0 = scalar, 123=XYZ) */ mxArray *comp = mxGetProperty(matlabhandle, 0, "component"); int vectorComponent; if(comp != NULL) { vectorComponent = (int)(*mxGetPr(comp)); } else { printf("Failed to fetch 'component' field of class: Not an ImogenArray? Bailing.\n"); return ERROR_INVALID_ARGS; } /* BEGIN DETERMINATION OF ANALYTIC BOUNDARY CONDITIONS */ int numDirections = 1; mxArray *bcModes = mxGetField(boundaryData, 0, "bcModes"); if(bcModes == NULL) { printf("FATAL: bcModes structure not present. Not an ImogenArray? Not initialized?\n"); return ERROR_INVALID_ARGS; } int j; for(j = 0; j < numDirections; j++) { if(direction == 0) continue; /* Skips edge BCs if desired. */ int memoryDirection = perm[direction-1]; /* So this is kinda brain-damaged, but the boundary condition modes are stored in the form { 'type minus x', 'type minus y', 'type minus z'; 'type plus x', 'type plus y', 'type plus z'}; Yes, strings in a cell array. */ /* Okay, that's not kinda, it's straight-up stupid. */ mxArray *bcstr; char *bs; int d; for(d = 0; d < 2; d++) { bcstr = mxGetCell(bcModes, 2*(memoryDirection-1) + d); bs = (char *)malloc(sizeof(char) * (mxGetNumberOfElements(bcstr)+1)); mxGetString(bcstr, bs, mxGetNumberOfElements(bcstr)+1); // Sets a mirror BC: scalar, vector_perp f(b+x) = f(b-x), vector normal f(b+x) = -f(b-x) if(strcmp(bs, "mirror") == 0) worked = setBoundarySAS(&phi, d, memoryDirection, vectorComponent == direction); // Extrapolates f(b+x) = f(b) if(strcmp(bs, "const") == 0) { worked = setBoundarySAS(&phi, d, memoryDirection, 2); } // Extrapolates f(b+x) = f(b) + x f'(b) // WARNING: This is unconditionally unstable unless normal flow rate is supersonic if(strcmp(bs, "linear") == 0) { worked = setBoundarySAS(&phi, d, memoryDirection, 3); } if(strcmp(bs, "wall") == 0) { printf("Wall BC is not implemented!\n"); return ERROR_INVALID_ARGS; } } if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked; } return SUCCESSFUL; } /* Sets the given array+AMD's boundary in the following manner: side -> 0 = negative edge 1 = positive edge direction -> 1 = X 2 = Y 3 = Z* sas -> 0 = symmetrize 1 => antisymmetrize -> 2 = extrap constant 3-> extrap linear *: As passed, assuming ImogenArray's indexPermute has been handled for us. */ void callBCKernel(dim3 griddim, dim3 blockdim, double *x, int nx, int ny, int nz, int ktable) { switch(ktable) { case 0:hipLaunchKernelGGL(( cukern_xminusSymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 1:hipLaunchKernelGGL(( cukern_xminusAntisymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 2:hipLaunchKernelGGL(( cukern_extrapolateConstBdyXMinus), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 3:hipLaunchKernelGGL(( cukern_extrapolateLinearBdyXMinus), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 4:hipLaunchKernelGGL(( cukern_xplusSymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 5:hipLaunchKernelGGL(( cukern_xplusAntisymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 6:hipLaunchKernelGGL(( cukern_extrapolateConstBdyXPlus), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 7:hipLaunchKernelGGL(( cukern_extrapolateLinearBdyXPlus), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 8:hipLaunchKernelGGL(( cukern_yminusSymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 9:hipLaunchKernelGGL(( cukern_yminusAntisymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 10: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 11: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 12:hipLaunchKernelGGL(( cukern_yplusSymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 13:hipLaunchKernelGGL(( cukern_yplusAntisymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 14: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 15: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 16:hipLaunchKernelGGL(( cukern_zminusSymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 17:hipLaunchKernelGGL(( cukern_zminusAntisymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 18: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 19: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 20:hipLaunchKernelGGL(( cukern_zplusSymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 21:hipLaunchKernelGGL(( cukern_zplusAntisymmetrize), dim3(griddim), dim3(blockdim), 0, 0, x, nx, ny, nz); break; case 22: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 23: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; } } void *getBCKernel(int X) { void *PLACEHOLDER = NULL; void *kerntable[24] = {(void *)&cukern_xminusSymmetrize, \ (void *)&cukern_xminusAntisymmetrize, \ (void *)&cukern_extrapolateConstBdyXMinus, \ (void *)&cukern_extrapolateLinearBdyXMinus, \ (void *)&cukern_xplusSymmetrize, \ (void *)&cukern_xplusAntisymmetrize, (void *)&cukern_extrapolateConstBdyXPlus, \ (void *)&cukern_extrapolateLinearBdyXPlus, \ (void *)&cukern_yminusSymmetrize, \ (void *)&cukern_yminusAntisymmetrize, \ PLACEHOLDER, \ PLACEHOLDER, \ (void *)&cukern_yplusSymmetrize, \ (void *)&cukern_yplusAntisymmetrize, PLACEHOLDER, \ PLACEHOLDER, \ (void *)&cukern_zminusSymmetrize, \ (void *)&cukern_zminusAntisymmetrize, \ PLACEHOLDER, \ PLACEHOLDER, \ (void *)&cukern_zplusSymmetrize, \ (void *)&cukern_zplusAntisymmetrize, \ PLACEHOLDER, \ PLACEHOLDER }; return kerntable[X]; } int setBoundarySAS(MGArray *phi, int side, int direction, int sas) { dim3 blockdim, griddim; void (* bckernel)(double *, int, int, int); int i, sub[6]; int returnCode; switch(direction) { case 1: { blockdim.x = 3; blockdim.y = 16; blockdim.z = 8; } break; case 2: { blockdim.x = 16; blockdim.y = 1; blockdim.z = 16; } break; case 3: { blockdim.x = 16; blockdim.y = 16; blockdim.z = 1; } break; } // This is the easy case; We just have to apply a left-side condition to the leftmost partition and a // right-side condition to the rightmost partition and we're done if(direction == phi->partitionDir) { switch(direction) { case 1: { griddim.x = phi->dim[1] / blockdim.y; griddim.x += (griddim.x*blockdim.y < phi->dim[1]); griddim.y = phi->dim[2] / blockdim.z; griddim.y += (griddim.y*blockdim.z < phi->dim[2]); } break; case 2: { griddim.x = phi->dim[0] / blockdim.x; griddim.x += (griddim.x*blockdim.x < phi->dim[0]); griddim.y = phi->dim[2] / blockdim.z; griddim.y += (griddim.y*blockdim.z < phi->dim[2]); } break; case 3: { griddim.x = phi->dim[0] / blockdim.x; griddim.x += (griddim.x*blockdim.x < phi->dim[0]); griddim.y = phi->dim[1] / blockdim.y; griddim.y += (griddim.y*blockdim.y < phi->dim[1]); } break; } i = (side == 0) ? 0 : (phi->nGPUs - 1); hipSetDevice(phi->deviceID[i]); returnCode = CHECK_CUDA_ERROR("hipSetDevice()"); if(returnCode != SUCCESSFUL) return returnCode; //bckernel = (void (*)(double *, int, int, int))getBCKernel(sas + 4*side + 8*(direction-1)); //if((void *)bckernel == NULL) mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); //bckernel<<<griddim, blockdim>>>(phi->devicePtr[i], phi->dim[0], phi->dim[1], phi->dim[2]); calcPartitionExtent(phi, i, sub); callBCKernel(griddim, blockdim, phi->devicePtr[i], sub[3], sub[4], sub[5], sas + 4*side + 8*(direction-1)); returnCode = CHECK_CUDA_LAUNCH_ERROR(blockdim, griddim, phi, sas + 2*side + 4*direction, "In setBoundarySAS; integer -> cukern table index"); if(returnCode != SUCCESSFUL) return returnCode; } else { // If the BC isn't on a face that's aimed in the partitioned direction, // we have to loop and apply it to all partitions. for(i = 0; i < phi->nGPUs; i++) { calcPartitionExtent(phi, i, sub); // Set the launch size based on partition extent switch(direction) { case 1: { griddim.x = sub[4] / blockdim.y; griddim.x += (griddim.x*blockdim.y < sub[4]); griddim.y = sub[5] / blockdim.z; griddim.y += (griddim.y*blockdim.z < sub[5]); } break; case 2: { griddim.x = sub[3] / blockdim.x; griddim.x += (griddim.x*blockdim.x < sub[3]); griddim.y = sub[5] / blockdim.z; griddim.y += (griddim.y*blockdim.z < sub[5]); } break; case 3: { griddim.x = sub[3] / blockdim.x; griddim.x += (griddim.x*blockdim.x < sub[3]); griddim.y = sub[4] / blockdim.y; griddim.y += (griddim.y*blockdim.y < sub[4]); } break; } hipSetDevice(phi->deviceID[i]); returnCode = CHECK_CUDA_ERROR("hipSetDevice()"); if(returnCode != SUCCESSFUL) return returnCode; //bckernel = (void (*)(double *, int, int, int))getBCKernel(sas + 4*side + 8*(direction-1)); //if((void *)bckernel == NULL) callBCKernel(griddim, blockdim, phi->devicePtr[i], sub[3], sub[4], sub[5], sas + 4*side + 8*(direction-1)); //bckernel<<<griddim, blockdim>>>(phi->devicePtr[i], sub[3], sub[4], sub[5]); returnCode = CHECK_CUDA_LAUNCH_ERROR(blockdim, griddim, phi, sas + 4*side + 8*(direction-1), "In setBoundarySAS; integer -> cukern table index"); if(returnCode != SUCCESSFUL) return returnCode; } } return SUCCESSFUL; } __global__ void cukern_applySpecial_fade(double *phi, double *statics, int nSpecials, int blkOffset) { int myAddr = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x*blockIdx.y); if(myAddr >= nSpecials) return; statics += myAddr; long int xaddr = (long int)statics[0]; double f0 = statics[blkOffset]; double c = statics[blkOffset*2]; // if(c >= 0) { // Fade condition: Exponentially pulls cell towards c with rate constant f0; phi[xaddr] = f0*c + (1.0-c)*phi[xaddr]; // } else { // Wall condition: Any transfer between the marked cells is reversed // Assumptions: 2nd cell (xprimeaddr) must be in a stationary, no-flux region // long int xprimeaddr = (long int) statics[myAddr + blkOffset*3]; // phi[xaddr] += (phi[xprimeaddr]-f0); // phi[xprimaddr] = f0; // } } /* X DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS FOR MIRROR BCS */ /* Assume a block size of [3 A B] with grid dimensions [M N 1] s.t. AM >= ny, BN >= nz*/ /* Define the preamble common to all of these kernels: */ #define XSASKERN_PREAMBLE \ int stridey = nx; int stridez = nx*ny; \ int yidx = threadIdx.y + blockIdx.x*blockDim.y; \ int zidx = threadIdx.z + blockIdx.y*blockDim.z; \ if(yidx >= ny) return; if(zidx >= nz) return; /* These are combined with vector/scalar type information to implement mirror BCs */ __global__ void cukern_xminusSymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; phi[2-threadIdx.x] = phi[4+threadIdx.x]; } __global__ void cukern_xminusAntisymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; phi[2-threadIdx.x] = -phi[4+threadIdx.x]; } __global__ void cukern_xplusSymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx - 7; phi[4+threadIdx.x] = phi[2-threadIdx.x]; } __global__ void cukern_xplusAntisymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx - 7; phi[4+threadIdx.x] = -phi[2-threadIdx.x]; } /* These are called when a BC is set to 'const' or 'linear' */ __global__ void cukern_extrapolateConstBdyXMinus(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; phi[threadIdx.x] = phi[3]; } __global__ void cukern_extrapolateConstBdyXPlus(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx - 3; phi[threadIdx.x] = phi[-1]; } __global__ void cukern_extrapolateLinearBdyXMinus(double *phi, int nx, int ny, int nz) { __shared__ double f[3]; XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; f[threadIdx.x] = phi[threadIdx.x+3]; __syncthreads(); phi[threadIdx.x] = phi[3] + (3-threadIdx.x)*(f[0]-f[1]); } __global__ void cukern_extrapolateLinearBdyXPlus(double *phi, int nx, int ny, int nz) { __shared__ double f[3]; XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx-5; f[threadIdx.x] = phi[threadIdx.x]; __syncthreads(); phi[threadIdx.x+2] = f[1] + (threadIdx.x+1)*(f[1]-f[0]); } /* Y DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* assume a block size of [A 1 B] with grid dimensions [M N 1] s.t. AM >= nx, BN >=nz */ #define YSASKERN_PREAMBLE \ int xidx = threadIdx.x + blockIdx.x*blockDim.x; \ int zidx = threadIdx.z + blockIdx.y*blockDim.y; \ if(xidx >= nx) return; if(zidx >= nz) return; \ phi += nx*ny*zidx; __global__ void cukern_yminusSymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx+nx*q] = phi[xidx+nx*(6-q)]; } } __global__ void cukern_yminusAntisymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx+nx*q] = -phi[xidx+nx*(6-q)]; } } __global__ void cukern_yplusSymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx-nx*q] = phi[xidx+nx*(q-6)]; } } __global__ void cukern_yplusAntisymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx-nx*q] = -phi[xidx+nx*(q-6)]; } } /* Z DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* Assume launch with size [A B 1] and grid of size [M N 1] s.t. AM >= nx, BN >= ny*/ #define ZSASKERN_PREAMBLE \ int xidx = threadIdx.x + blockIdx.x * blockDim.x; \ int yidx = threadIdx.y + blockIdx.y * blockDim.y; \ if(xidx >= nx) return; if(yidx >= ny) return; \ phi += xidx + nx*yidx; __global__ void cukern_zminusSymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[4*stride]; p[1] = phi[5*stride]; p[2] = phi[6*stride]; phi[ 0 ] = p[2]; phi[ stride] = p[1]; phi[2*stride] = p[0]; } __global__ void cukern_zminusAntisymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[4*stride]; p[1] = phi[5*stride]; p[2] = phi[6*stride]; phi[ 0 ] = -p[2]; phi[ stride] = -p[1]; phi[2*stride] = -p[0]; } __global__ void cukern_zplusSymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[0]; p[1] = phi[stride]; p[2] = phi[2*stride]; phi[4*stride] = p[2]; phi[5*stride] = p[1]; phi[6*stride] = p[0]; } __global__ void cukern_zplusAntisymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[0]; p[1] = phi[stride]; p[2] = phi[2*stride]; phi[4*stride] = -p[2]; phi[5*stride] = -p[1]; phi[6*stride] = -p[0]; }
4be67f01143de68d777fdd31cc10b202ff72c844.cu
#include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" // CUDA #include "cuda.h" #include "cuda_runtime.h" #include "cublas.h" #include "cudaCommon.h" #include "cudaStatics.h" /* THIS FUNCTION: cudaStatics is used in the imposition of several kinds of boundary conditions upon arrays. Given a list of indices I, coefficients C and values V, it writes out phi[I] = (1-C)*phi[I] + C[i]*V[i], causing phi[I] to fade to V[i] at an exponential rate. It is also able to set mirror boundary conditions (FIXME: Not fully tested!) */ /* FIXME: rewrite this crap with template<>s */ /* X DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS FOR MIRROR BCS */ /* Assume a block size of [3 A B] */ __global__ void cukern_xminusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_xminusAntisymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_xplusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_xplusAntisymmetrize(double *phi, int nx, int ny, int nz); /* Y DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* assume a block size of [N 1 M] */ __global__ void cukern_yminusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_yminusAntisymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_yplusSymmetrize(double *phi, int nx, int ny, int nz); __global__ void cukern_yplusAntisymmetrize(double *phi, int nx, int ny, int nz); /* Z DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* Assume launch with size [U V 1] */ __global__ void cukern_zminusSymmetrize(double *Phi, int nx, int ny, int nz); __global__ void cukern_zminusAntisymmetrize(double *Phi, int nx, int ny, int nz); __global__ void cukern_zplusSymmetrize(double *Phi, int nx, int ny, int nz); __global__ void cukern_zplusAntisymmetrize(double *Phi, int nx, int ny, int nz); /* X direction extrapolated boundary conditions */ /* Launch size [3 A B] */ __global__ void cukern_extrapolateLinearBdyXMinus(double *phi, int nx, int ny, int nz); __global__ void cukern_extrapolateLinearBdyXPlus(double *phi, int nx, int ny, int nz); __global__ void cukern_extrapolateConstBdyXMinus(double *phi, int nx, int ny, int nz); __global__ void cukern_extrapolateConstBdyXPlus(double *phi, int nx, int ny, int nz); __global__ void cukern_applySpecial_fade(double *phi, double *statics, int nSpecials, int blkOffset); int setBoundarySAS(MGArray *phi, int side, int direction, int sas); #ifdef STANDALONE_MEX_FUNCTION void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if( (nlhs != 0) || (nrhs != 3)) { mexErrMsgTxt("cudaStatics operator is cudaStatics(ImogenArray, blockdim, direction)"); } CHECK_CUDA_ERROR("entering cudaStatics"); setBoundaryConditions(prhs[0], (int)*mxGetPr(prhs[2])); } #endif /* FIXME: This is terrible. * FIXME: MGArray needs to provision carrying its own boundary condition metadata around somehow. */ int setBoundaryConditions(const mxArray *matlabhandle, int direction) { CHECK_CUDA_ERROR("entering setBoundaryConditions"); MGArray phi, statics; int worked = MGA_accessMatlabArrays((const mxArray **)&matlabhandle, 0, 0, &phi); BAIL_ON_FAIL(worked) /* Grabs the whole boundaryData struct from the ImogenArray class */ mxArray *boundaryData = mxGetProperty(matlabhandle, 0, "boundaryData"); if(boundaryData == NULL) { printf("FATAL: field 'boundaryData' D.N.E. in class. Not a class? Not a FluidArray/MagnetArray/InitializedArray?\n"); return ERROR_INVALID_ARGS; } /* The statics describe "solid" structures which we force the grid to have */ mxArray *gpuStatics = mxGetField(boundaryData, 0, "staticsData"); if(gpuStatics == NULL) { printf("FATAL: field 'staticsData' D.N.E. in boundaryData struct. Statics not compiled?\n"); return ERROR_INVALID_ARGS; } worked = MGA_accessMatlabArrays((const mxArray **)(&gpuStatics), 0, 0, &statics); BAIL_ON_FAIL(worked) int *perm = &phi.currentPermutation[0]; int offsetidx = 2*(perm[0]-1) + 1*(perm[1] > perm[2]); /* The offset array describes the index offsets for the data in the gpuStatics array */ mxArray *offsets = mxGetField(boundaryData, 0, "compOffset"); if(offsets == NULL) { printf("FATAL: field 'compOffset' D.N.E. in boundaryData. Not an ImogenArray? Statics not compiled?\n"); return ERROR_INVALID_ARGS; } double *offsetcount = mxGetPr(offsets); long int staticsOffset = (long int)offsetcount[2*offsetidx]; int staticsNumel = (int)offsetcount[2*offsetidx+1]; /* Parameter describes what block size to launch with... */ int blockdim = 8; dim3 griddim; griddim.x = staticsNumel / blockdim + 1; if(griddim.x > 32768) { griddim.x = 32768; griddim.y = staticsNumel/(blockdim*griddim.x) + 1; } /* Every call results in applying specials */ if(statics.numel > 0) { PAR_WARN(phi); cukern_applySpecial_fade<<<griddim, blockdim>>>(phi.devicePtr[0], statics.devicePtr[0] + staticsOffset, statics.numel, statics.dim[0]); worked = CHECK_CUDA_LAUNCH_ERROR(blockdim, griddim, &phi, 0, "cuda statics application"); if(worked != SUCCESSFUL) return worked; } /* Indicates which part of a 3-vector this array is (0 = scalar, 123=XYZ) */ mxArray *comp = mxGetProperty(matlabhandle, 0, "component"); int vectorComponent; if(comp != NULL) { vectorComponent = (int)(*mxGetPr(comp)); } else { printf("Failed to fetch 'component' field of class: Not an ImogenArray? Bailing.\n"); return ERROR_INVALID_ARGS; } /* BEGIN DETERMINATION OF ANALYTIC BOUNDARY CONDITIONS */ int numDirections = 1; mxArray *bcModes = mxGetField(boundaryData, 0, "bcModes"); if(bcModes == NULL) { printf("FATAL: bcModes structure not present. Not an ImogenArray? Not initialized?\n"); return ERROR_INVALID_ARGS; } int j; for(j = 0; j < numDirections; j++) { if(direction == 0) continue; /* Skips edge BCs if desired. */ int memoryDirection = perm[direction-1]; /* So this is kinda brain-damaged, but the boundary condition modes are stored in the form { 'type minus x', 'type minus y', 'type minus z'; 'type plus x', 'type plus y', 'type plus z'}; Yes, strings in a cell array. */ /* Okay, that's not kinda, it's straight-up stupid. */ mxArray *bcstr; char *bs; int d; for(d = 0; d < 2; d++) { bcstr = mxGetCell(bcModes, 2*(memoryDirection-1) + d); bs = (char *)malloc(sizeof(char) * (mxGetNumberOfElements(bcstr)+1)); mxGetString(bcstr, bs, mxGetNumberOfElements(bcstr)+1); // Sets a mirror BC: scalar, vector_perp f(b+x) = f(b-x), vector normal f(b+x) = -f(b-x) if(strcmp(bs, "mirror") == 0) worked = setBoundarySAS(&phi, d, memoryDirection, vectorComponent == direction); // Extrapolates f(b+x) = f(b) if(strcmp(bs, "const") == 0) { worked = setBoundarySAS(&phi, d, memoryDirection, 2); } // Extrapolates f(b+x) = f(b) + x f'(b) // WARNING: This is unconditionally unstable unless normal flow rate is supersonic if(strcmp(bs, "linear") == 0) { worked = setBoundarySAS(&phi, d, memoryDirection, 3); } if(strcmp(bs, "wall") == 0) { printf("Wall BC is not implemented!\n"); return ERROR_INVALID_ARGS; } } if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked; } return SUCCESSFUL; } /* Sets the given array+AMD's boundary in the following manner: side -> 0 = negative edge 1 = positive edge direction -> 1 = X 2 = Y 3 = Z* sas -> 0 = symmetrize 1 => antisymmetrize -> 2 = extrap constant 3-> extrap linear *: As passed, assuming ImogenArray's indexPermute has been handled for us. */ void callBCKernel(dim3 griddim, dim3 blockdim, double *x, int nx, int ny, int nz, int ktable) { switch(ktable) { case 0: cukern_xminusSymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 1: cukern_xminusAntisymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 2: cukern_extrapolateConstBdyXMinus<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 3: cukern_extrapolateLinearBdyXMinus<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 4: cukern_xplusSymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 5: cukern_xplusAntisymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 6: cukern_extrapolateConstBdyXPlus<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 7: cukern_extrapolateLinearBdyXPlus<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 8: cukern_yminusSymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 9: cukern_yminusAntisymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 10: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 11: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 12: cukern_yplusSymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 13: cukern_yplusAntisymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 14: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 15: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 16: cukern_zminusSymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 17: cukern_zminusAntisymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 18: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 19: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 20: cukern_zplusSymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 21: cukern_zplusAntisymmetrize<<<griddim, blockdim>>>(x, nx, ny, nz); break; case 22: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; case 23: mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); break; } } void *getBCKernel(int X) { void *PLACEHOLDER = NULL; void *kerntable[24] = {(void *)&cukern_xminusSymmetrize, \ (void *)&cukern_xminusAntisymmetrize, \ (void *)&cukern_extrapolateConstBdyXMinus, \ (void *)&cukern_extrapolateLinearBdyXMinus, \ (void *)&cukern_xplusSymmetrize, \ (void *)&cukern_xplusAntisymmetrize, (void *)&cukern_extrapolateConstBdyXPlus, \ (void *)&cukern_extrapolateLinearBdyXPlus, \ (void *)&cukern_yminusSymmetrize, \ (void *)&cukern_yminusAntisymmetrize, \ PLACEHOLDER, \ PLACEHOLDER, \ (void *)&cukern_yplusSymmetrize, \ (void *)&cukern_yplusAntisymmetrize, PLACEHOLDER, \ PLACEHOLDER, \ (void *)&cukern_zminusSymmetrize, \ (void *)&cukern_zminusAntisymmetrize, \ PLACEHOLDER, \ PLACEHOLDER, \ (void *)&cukern_zplusSymmetrize, \ (void *)&cukern_zplusAntisymmetrize, \ PLACEHOLDER, \ PLACEHOLDER }; return kerntable[X]; } int setBoundarySAS(MGArray *phi, int side, int direction, int sas) { dim3 blockdim, griddim; void (* bckernel)(double *, int, int, int); int i, sub[6]; int returnCode; switch(direction) { case 1: { blockdim.x = 3; blockdim.y = 16; blockdim.z = 8; } break; case 2: { blockdim.x = 16; blockdim.y = 1; blockdim.z = 16; } break; case 3: { blockdim.x = 16; blockdim.y = 16; blockdim.z = 1; } break; } // This is the easy case; We just have to apply a left-side condition to the leftmost partition and a // right-side condition to the rightmost partition and we're done if(direction == phi->partitionDir) { switch(direction) { case 1: { griddim.x = phi->dim[1] / blockdim.y; griddim.x += (griddim.x*blockdim.y < phi->dim[1]); griddim.y = phi->dim[2] / blockdim.z; griddim.y += (griddim.y*blockdim.z < phi->dim[2]); } break; case 2: { griddim.x = phi->dim[0] / blockdim.x; griddim.x += (griddim.x*blockdim.x < phi->dim[0]); griddim.y = phi->dim[2] / blockdim.z; griddim.y += (griddim.y*blockdim.z < phi->dim[2]); } break; case 3: { griddim.x = phi->dim[0] / blockdim.x; griddim.x += (griddim.x*blockdim.x < phi->dim[0]); griddim.y = phi->dim[1] / blockdim.y; griddim.y += (griddim.y*blockdim.y < phi->dim[1]); } break; } i = (side == 0) ? 0 : (phi->nGPUs - 1); cudaSetDevice(phi->deviceID[i]); returnCode = CHECK_CUDA_ERROR("cudaSetDevice()"); if(returnCode != SUCCESSFUL) return returnCode; //bckernel = (void (*)(double *, int, int, int))getBCKernel(sas + 4*side + 8*(direction-1)); //if((void *)bckernel == NULL) mexErrMsgTxt("Fatal: This boundary condition has not been implemented yet."); //bckernel<<<griddim, blockdim>>>(phi->devicePtr[i], phi->dim[0], phi->dim[1], phi->dim[2]); calcPartitionExtent(phi, i, sub); callBCKernel(griddim, blockdim, phi->devicePtr[i], sub[3], sub[4], sub[5], sas + 4*side + 8*(direction-1)); returnCode = CHECK_CUDA_LAUNCH_ERROR(blockdim, griddim, phi, sas + 2*side + 4*direction, "In setBoundarySAS; integer -> cukern table index"); if(returnCode != SUCCESSFUL) return returnCode; } else { // If the BC isn't on a face that's aimed in the partitioned direction, // we have to loop and apply it to all partitions. for(i = 0; i < phi->nGPUs; i++) { calcPartitionExtent(phi, i, sub); // Set the launch size based on partition extent switch(direction) { case 1: { griddim.x = sub[4] / blockdim.y; griddim.x += (griddim.x*blockdim.y < sub[4]); griddim.y = sub[5] / blockdim.z; griddim.y += (griddim.y*blockdim.z < sub[5]); } break; case 2: { griddim.x = sub[3] / blockdim.x; griddim.x += (griddim.x*blockdim.x < sub[3]); griddim.y = sub[5] / blockdim.z; griddim.y += (griddim.y*blockdim.z < sub[5]); } break; case 3: { griddim.x = sub[3] / blockdim.x; griddim.x += (griddim.x*blockdim.x < sub[3]); griddim.y = sub[4] / blockdim.y; griddim.y += (griddim.y*blockdim.y < sub[4]); } break; } cudaSetDevice(phi->deviceID[i]); returnCode = CHECK_CUDA_ERROR("cudaSetDevice()"); if(returnCode != SUCCESSFUL) return returnCode; //bckernel = (void (*)(double *, int, int, int))getBCKernel(sas + 4*side + 8*(direction-1)); //if((void *)bckernel == NULL) callBCKernel(griddim, blockdim, phi->devicePtr[i], sub[3], sub[4], sub[5], sas + 4*side + 8*(direction-1)); //bckernel<<<griddim, blockdim>>>(phi->devicePtr[i], sub[3], sub[4], sub[5]); returnCode = CHECK_CUDA_LAUNCH_ERROR(blockdim, griddim, phi, sas + 4*side + 8*(direction-1), "In setBoundarySAS; integer -> cukern table index"); if(returnCode != SUCCESSFUL) return returnCode; } } return SUCCESSFUL; } __global__ void cukern_applySpecial_fade(double *phi, double *statics, int nSpecials, int blkOffset) { int myAddr = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x*blockIdx.y); if(myAddr >= nSpecials) return; statics += myAddr; long int xaddr = (long int)statics[0]; double f0 = statics[blkOffset]; double c = statics[blkOffset*2]; // if(c >= 0) { // Fade condition: Exponentially pulls cell towards c with rate constant f0; phi[xaddr] = f0*c + (1.0-c)*phi[xaddr]; // } else { // Wall condition: Any transfer between the marked cells is reversed // Assumptions: 2nd cell (xprimeaddr) must be in a stationary, no-flux region // long int xprimeaddr = (long int) statics[myAddr + blkOffset*3]; // phi[xaddr] += (phi[xprimeaddr]-f0); // phi[xprimaddr] = f0; // } } /* X DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS FOR MIRROR BCS */ /* Assume a block size of [3 A B] with grid dimensions [M N 1] s.t. AM >= ny, BN >= nz*/ /* Define the preamble common to all of these kernels: */ #define XSASKERN_PREAMBLE \ int stridey = nx; int stridez = nx*ny; \ int yidx = threadIdx.y + blockIdx.x*blockDim.y; \ int zidx = threadIdx.z + blockIdx.y*blockDim.z; \ if(yidx >= ny) return; if(zidx >= nz) return; /* These are combined with vector/scalar type information to implement mirror BCs */ __global__ void cukern_xminusSymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; phi[2-threadIdx.x] = phi[4+threadIdx.x]; } __global__ void cukern_xminusAntisymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; phi[2-threadIdx.x] = -phi[4+threadIdx.x]; } __global__ void cukern_xplusSymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx - 7; phi[4+threadIdx.x] = phi[2-threadIdx.x]; } __global__ void cukern_xplusAntisymmetrize(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx - 7; phi[4+threadIdx.x] = -phi[2-threadIdx.x]; } /* These are called when a BC is set to 'const' or 'linear' */ __global__ void cukern_extrapolateConstBdyXMinus(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; phi[threadIdx.x] = phi[3]; } __global__ void cukern_extrapolateConstBdyXPlus(double *phi, int nx, int ny, int nz) { XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx - 3; phi[threadIdx.x] = phi[-1]; } __global__ void cukern_extrapolateLinearBdyXMinus(double *phi, int nx, int ny, int nz) { __shared__ double f[3]; XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx; f[threadIdx.x] = phi[threadIdx.x+3]; __syncthreads(); phi[threadIdx.x] = phi[3] + (3-threadIdx.x)*(f[0]-f[1]); } __global__ void cukern_extrapolateLinearBdyXPlus(double *phi, int nx, int ny, int nz) { __shared__ double f[3]; XSASKERN_PREAMBLE phi += stridey*yidx + stridez*zidx + nx-5; f[threadIdx.x] = phi[threadIdx.x]; __syncthreads(); phi[threadIdx.x+2] = f[1] + (threadIdx.x+1)*(f[1]-f[0]); } /* Y DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* assume a block size of [A 1 B] with grid dimensions [M N 1] s.t. AM >= nx, BN >=nz */ #define YSASKERN_PREAMBLE \ int xidx = threadIdx.x + blockIdx.x*blockDim.x; \ int zidx = threadIdx.z + blockIdx.y*blockDim.y; \ if(xidx >= nx) return; if(zidx >= nz) return; \ phi += nx*ny*zidx; __global__ void cukern_yminusSymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx+nx*q] = phi[xidx+nx*(6-q)]; } } __global__ void cukern_yminusAntisymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx+nx*q] = -phi[xidx+nx*(6-q)]; } } __global__ void cukern_yplusSymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx-nx*q] = phi[xidx+nx*(q-6)]; } } __global__ void cukern_yplusAntisymmetrize(double *phi, int nx, int ny, int nz) { YSASKERN_PREAMBLE int q; for(q = 0; q < 3; q++) { phi[xidx-nx*q] = -phi[xidx+nx*(q-6)]; } } /* Z DIRECTION SYMMETRIC/ANTISYMMETRIC BC KERNELS */ /* Assume launch with size [A B 1] and grid of size [M N 1] s.t. AM >= nx, BN >= ny*/ #define ZSASKERN_PREAMBLE \ int xidx = threadIdx.x + blockIdx.x * blockDim.x; \ int yidx = threadIdx.y + blockIdx.y * blockDim.y; \ if(xidx >= nx) return; if(yidx >= ny) return; \ phi += xidx + nx*yidx; __global__ void cukern_zminusSymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[4*stride]; p[1] = phi[5*stride]; p[2] = phi[6*stride]; phi[ 0 ] = p[2]; phi[ stride] = p[1]; phi[2*stride] = p[0]; } __global__ void cukern_zminusAntisymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[4*stride]; p[1] = phi[5*stride]; p[2] = phi[6*stride]; phi[ 0 ] = -p[2]; phi[ stride] = -p[1]; phi[2*stride] = -p[0]; } __global__ void cukern_zplusSymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[0]; p[1] = phi[stride]; p[2] = phi[2*stride]; phi[4*stride] = p[2]; phi[5*stride] = p[1]; phi[6*stride] = p[0]; } __global__ void cukern_zplusAntisymmetrize(double *phi, int nx, int ny, int nz) { ZSASKERN_PREAMBLE double p[3]; int stride = nx*ny; p[0] = phi[0]; p[1] = phi[stride]; p[2] = phi[2*stride]; phi[4*stride] = -p[2]; phi[5*stride] = -p[1]; phi[6*stride] = -p[0]; }
761c5d7f2c7f2ea74add9bf2fa490a95d6a70b98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "SteerForEvasionCUDA.cuh" #include "../AgentGroupData.cuh" #include "../VectorUtils.cuh" #include "CUDAKernelGlobals.cuh" using namespace OpenSteer; extern "C" { __global__ void SteerForEvasionKernel( // Agent data. float4 const* pdPosition, float4 const* pdDirection, float4 * pdSteering, float3 const menacePosition, float3 const menaceDirection, float const menaceSpeed, float const maxPredictionTime, size_t const numAgents, float const fWeight, uint * pdAppliedKernels, uint const doNotApplyWith ); } __global__ void SteerForEvasionKernel( // Agent data. float4 const* pdPosition, float4 const* pdDirection, float4 * pdSteering, float3 const menacePosition, float3 const menaceDirection, float const menaceSpeed, float const maxPredictionTime, size_t const numAgents, float const fWeight, uint * pdAppliedKernels, uint const doNotApplyWith ) { int const index = (blockIdx.x * blockDim.x) + threadIdx.x; if( index >= numAgents ) return; if( pdAppliedKernels[ index ] & doNotApplyWith ) return; __shared__ float3 shPosition[ THREADSPERBLOCK ]; __shared__ float3 shDirection[ THREADSPERBLOCK ]; __shared__ float3 shSteering[ THREADSPERBLOCK ]; POSITION_SH( threadIdx.x ) = POSITION_F3( index ); DIRECTION_SH( threadIdx.x ) = DIRECTION_F3( index ); STEERING_SH( threadIdx.x ) = STEERING_F3( index ); // offset from this to menace, that distance, unit vector toward menace float3 const offset = float3_subtract( menacePosition, POSITION_SH( threadIdx.x ) ); float const distance = float3_length( offset ); float const roughTime = distance / menaceSpeed; float const predictionTime = ((roughTime > maxPredictionTime) ? maxPredictionTime : roughTime); float3 const targetPosition = float3_add( menacePosition, float3_scalar_multiply( float3_scalar_multiply( menaceDirection, menaceSpeed ), predictionTime ) ); // Get the desired velocity. float3 const desiredVelocity = float3_subtract( POSITION_SH( threadIdx.x ), targetPosition ); // Set the steering vector. float3 steering = float3_subtract( desiredVelocity, DIRECTION_SH( threadIdx.x ) ); // Normalize and apply the weight. steering = float3_scalar_multiply( float3_normalize( steering ), fWeight ); // Set the applied kernel bit. if( ! float3_equals( steering, float3_zero() ) ) pdAppliedKernels[ index ] |= KERNEL_EVADE_BIT; // Add into the steering vector. STEERING_SH( threadIdx.x ) = float3_add( steering, STEERING_SH( threadIdx.x ) ); // Copy the steering vectors back to global memory. STEERING( index ) = STEERING_SH_F4( threadIdx.x ); }
761c5d7f2c7f2ea74add9bf2fa490a95d6a70b98.cu
#include "SteerForEvasionCUDA.cuh" #include "../AgentGroupData.cuh" #include "../VectorUtils.cuh" #include "CUDAKernelGlobals.cuh" using namespace OpenSteer; extern "C" { __global__ void SteerForEvasionKernel( // Agent data. float4 const* pdPosition, float4 const* pdDirection, float4 * pdSteering, float3 const menacePosition, float3 const menaceDirection, float const menaceSpeed, float const maxPredictionTime, size_t const numAgents, float const fWeight, uint * pdAppliedKernels, uint const doNotApplyWith ); } __global__ void SteerForEvasionKernel( // Agent data. float4 const* pdPosition, float4 const* pdDirection, float4 * pdSteering, float3 const menacePosition, float3 const menaceDirection, float const menaceSpeed, float const maxPredictionTime, size_t const numAgents, float const fWeight, uint * pdAppliedKernels, uint const doNotApplyWith ) { int const index = (blockIdx.x * blockDim.x) + threadIdx.x; if( index >= numAgents ) return; if( pdAppliedKernels[ index ] & doNotApplyWith ) return; __shared__ float3 shPosition[ THREADSPERBLOCK ]; __shared__ float3 shDirection[ THREADSPERBLOCK ]; __shared__ float3 shSteering[ THREADSPERBLOCK ]; POSITION_SH( threadIdx.x ) = POSITION_F3( index ); DIRECTION_SH( threadIdx.x ) = DIRECTION_F3( index ); STEERING_SH( threadIdx.x ) = STEERING_F3( index ); // offset from this to menace, that distance, unit vector toward menace float3 const offset = float3_subtract( menacePosition, POSITION_SH( threadIdx.x ) ); float const distance = float3_length( offset ); float const roughTime = distance / menaceSpeed; float const predictionTime = ((roughTime > maxPredictionTime) ? maxPredictionTime : roughTime); float3 const targetPosition = float3_add( menacePosition, float3_scalar_multiply( float3_scalar_multiply( menaceDirection, menaceSpeed ), predictionTime ) ); // Get the desired velocity. float3 const desiredVelocity = float3_subtract( POSITION_SH( threadIdx.x ), targetPosition ); // Set the steering vector. float3 steering = float3_subtract( desiredVelocity, DIRECTION_SH( threadIdx.x ) ); // Normalize and apply the weight. steering = float3_scalar_multiply( float3_normalize( steering ), fWeight ); // Set the applied kernel bit. if( ! float3_equals( steering, float3_zero() ) ) pdAppliedKernels[ index ] |= KERNEL_EVADE_BIT; // Add into the steering vector. STEERING_SH( threadIdx.x ) = float3_add( steering, STEERING_SH( threadIdx.x ) ); // Copy the steering vectors back to global memory. STEERING( index ) = STEERING_SH_F4( threadIdx.x ); }
1c27ab7b4fd5dcc59afbd2d7cccff4e697b9d970.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <stdio.h> #include "hip/hip_runtime.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/binary_search.h> #include <thrust/set_operations.h> #include <thrust/extrema.h> #include "pcuda_string.h" PCudaString::PCudaString() { this->len = -1; this->str = NULL; } PCudaString::PCudaString(const std::string& other) { this->len = other.length(); this->ptr = thrust::device_malloc(this->len + 1); this->str = raw_pointer_cast(this->ptr); hipMemcpy(this->str, other.c_str(), this->len, hipMemcpyHostToDevice); } PCudaString::PCudaString(const PCudaString& other) { this->len = other.len; this->str = other.str; this->ptr = other.ptr; } int PCudaString::length() { return this->len; } int PCudaString::cstr_length() { return this->len + 1; } PCudaString::operator std::string() { std::string retval; thrust::copy(this->ptr, this->ptr + this->len, back_inserter(retval)); return retval; } void PCudaString::destroy() { if (this->str) { thrust::device_free(this->ptr); this->str = NULL; this->len = -1; } } bool operator< (PCudaString lhs, PCudaString rhs) { char *l = lhs.str; char *r = rhs.str; while((*l && *r) && *l == *r) { ++l; ++r; } return *l < *r; } bool pcuda_integer_sort(std::vector<long> *data) { thrust::device_vector<long> device = *data; thrust::sort(device.begin(), device.end()); thrust::copy(device.begin(), device.end(), data->begin()); return true; } bool pcuda_float_sort(std::vector<double> *data) { thrust::device_vector<double> device = *data; thrust::sort(device.begin(), device.end()); thrust::copy(device.begin(), device.end(), data->begin()); return true; } bool pcuda_string_sort(std::vector<std::string> *data) { printf("In pcuda_string_sort\n"); thrust::device_vector<PCudaString> device; printf("Reserving memory\n"); device.reserve(data->size()); printf("Copying data to device\n"); for (std::vector<std::string>::iterator iter = data->begin(); iter != data->end(); ++iter) { std::string s = *iter; device.push_back(s); } printf("On-device sort\n"); thrust::sort(device.begin(), device.end()); printf("Copying data from device\n"); thrust::host_vector<PCudaString> results = device; data->clear(); for (thrust::host_vector<PCudaString>::iterator iter = results.begin(); iter != results.end(); ++iter) { PCudaString cs = *iter; std::string s = cs; cs.destroy(); data->push_back(s); } printf("Done!\n"); return true; } bool pcuda_integer_binary_search(std::vector<long> *data, long target) { thrust::device_vector<long> device = *data; return thrust::binary_search(device.begin(), device.end(), target, thrust::less<long>()); } bool pcuda_float_binary_search(std::vector<double> *data, double target) { thrust::device_vector<double> device = *data; return thrust::binary_search(device.begin(), device.end(), target, thrust::less<double>()); } void pcuda_integer_intersection(std::vector<long> *first, std::vector<long> *second, std::vector<long> *intersection) { thrust::set_intersection(first->begin(), first->end(), second->begin(), second->end(), std::back_inserter(*intersection)); } void pcuda_float_intersection(std::vector<double> *first, std::vector<double> *second, std::vector<double> *intersection) { thrust::set_intersection(first->begin(), first->end(), second->begin(), second->end(), std::back_inserter(*intersection)); } void pcuda_integer_minmax(std::vector<long> *data, long *minmax) { thrust::pair<std::vector<long>::iterator, std::vector<long>::iterator> result = thrust::minmax_element(data->begin(), data->end()); minmax[0] = *result.first; minmax[1] = *result.second; } void pcuda_float_minmax(std::vector<double> *data, double *minmax) { thrust::pair<std::vector<double>::iterator, std::vector<double>::iterator> result = thrust::minmax_element(data->begin(), data->end()); minmax[0] = *result.first; minmax[1] = *result.second; }
1c27ab7b4fd5dcc59afbd2d7cccff4e697b9d970.cu
#include <vector> #include <stdio.h> #include "cuda.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/binary_search.h> #include <thrust/set_operations.h> #include <thrust/extrema.h> #include "pcuda_string.h" PCudaString::PCudaString() { this->len = -1; this->str = NULL; } PCudaString::PCudaString(const std::string& other) { this->len = other.length(); this->ptr = thrust::device_malloc(this->len + 1); this->str = raw_pointer_cast(this->ptr); cudaMemcpy(this->str, other.c_str(), this->len, cudaMemcpyHostToDevice); } PCudaString::PCudaString(const PCudaString& other) { this->len = other.len; this->str = other.str; this->ptr = other.ptr; } int PCudaString::length() { return this->len; } int PCudaString::cstr_length() { return this->len + 1; } PCudaString::operator std::string() { std::string retval; thrust::copy(this->ptr, this->ptr + this->len, back_inserter(retval)); return retval; } void PCudaString::destroy() { if (this->str) { thrust::device_free(this->ptr); this->str = NULL; this->len = -1; } } bool operator< (PCudaString lhs, PCudaString rhs) { char *l = lhs.str; char *r = rhs.str; while((*l && *r) && *l == *r) { ++l; ++r; } return *l < *r; } bool pcuda_integer_sort(std::vector<long> *data) { thrust::device_vector<long> device = *data; thrust::sort(device.begin(), device.end()); thrust::copy(device.begin(), device.end(), data->begin()); return true; } bool pcuda_float_sort(std::vector<double> *data) { thrust::device_vector<double> device = *data; thrust::sort(device.begin(), device.end()); thrust::copy(device.begin(), device.end(), data->begin()); return true; } bool pcuda_string_sort(std::vector<std::string> *data) { printf("In pcuda_string_sort\n"); thrust::device_vector<PCudaString> device; printf("Reserving memory\n"); device.reserve(data->size()); printf("Copying data to device\n"); for (std::vector<std::string>::iterator iter = data->begin(); iter != data->end(); ++iter) { std::string s = *iter; device.push_back(s); } printf("On-device sort\n"); thrust::sort(device.begin(), device.end()); printf("Copying data from device\n"); thrust::host_vector<PCudaString> results = device; data->clear(); for (thrust::host_vector<PCudaString>::iterator iter = results.begin(); iter != results.end(); ++iter) { PCudaString cs = *iter; std::string s = cs; cs.destroy(); data->push_back(s); } printf("Done!\n"); return true; } bool pcuda_integer_binary_search(std::vector<long> *data, long target) { thrust::device_vector<long> device = *data; return thrust::binary_search(device.begin(), device.end(), target, thrust::less<long>()); } bool pcuda_float_binary_search(std::vector<double> *data, double target) { thrust::device_vector<double> device = *data; return thrust::binary_search(device.begin(), device.end(), target, thrust::less<double>()); } void pcuda_integer_intersection(std::vector<long> *first, std::vector<long> *second, std::vector<long> *intersection) { thrust::set_intersection(first->begin(), first->end(), second->begin(), second->end(), std::back_inserter(*intersection)); } void pcuda_float_intersection(std::vector<double> *first, std::vector<double> *second, std::vector<double> *intersection) { thrust::set_intersection(first->begin(), first->end(), second->begin(), second->end(), std::back_inserter(*intersection)); } void pcuda_integer_minmax(std::vector<long> *data, long *minmax) { thrust::pair<std::vector<long>::iterator, std::vector<long>::iterator> result = thrust::minmax_element(data->begin(), data->end()); minmax[0] = *result.first; minmax[1] = *result.second; } void pcuda_float_minmax(std::vector<double> *data, double *minmax) { thrust::pair<std::vector<double>::iterator, std::vector<double>::iterator> result = thrust::minmax_element(data->begin(), data->end()); minmax[0] = *result.first; minmax[1] = *result.second; }
5fc060817b2404599b6b5d9a6db9cf63dffed841.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "helper_functions.h" #include "IC_GN.cuh" #include <stdio.h> __global__ void computeICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, float m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, int m_iNumberY, int m_iNumberX, int m_iSubsetH, int m_iSubsetW, int m_iWidth, int m_iHeight, int m_iSubsetY, int m_iSubsetX, int m_iMaxiteration, float* output_dP, int* m_iIterationNum, float* m_dSubsetR, float* m_dSubsetT, float* m_dJacobian, float* m_dRDescent, float* m_dHessianXY,float* m_dSubsetAveR, float* m_dSubsetAveT, float* m_dError, float* m_dDP) /*Input: all the const variables Output: deformation P matrix Strategy: Each thread computes one of the 21*21 POIs */ { unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int offset = row*m_iNumberX + col; float m_dU, m_dV, m_dUx, m_dUy, m_dVx, m_dVy; float m_dDU, m_dDV, m_dDUx, m_dDUy, m_dDVx, m_dDVy; float m_dSubAveR,m_dSubNorR, m_dSubAveT,m_dSubNorT; float m_dWarp[3][3], m_dHessian[6][6], m_dInvHessian[6][6]; float m_dNumerator[6]; float m_dTemp; int m_iTemp; int m_iIteration; float m_dWarpX, m_dWarpY; int m_iTempX,m_iTempY; float m_dTempX, m_dTempY; if(row<m_iNumberY && col<m_iNumberX){ m_dU = input_iU[offset]; m_dV = input_iV[offset]; m_dUx = 0; m_dUy = 0; m_dVx = 0; m_dVy = 0; output_dP[offset*6+0] = m_dU; output_dP[offset*6+1] = m_dUx; output_dP[offset*6+2] = m_dUy; output_dP[offset*6+3] = m_dV; output_dP[offset*6+4] = m_dVx; output_dP[offset*6+5] = m_dVy; // Initialize the warp matrix m_dWarp[0][0] = 1 + m_dUx; m_dWarp[0][1] = m_dUy; m_dWarp[0][2] = m_dU; m_dWarp[1][0] = m_dVx; m_dWarp[1][1] = 1 + m_dVy; m_dWarp[1][2] = m_dV; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; // Initialize the Hessian matrix in subset R for (int k = 0; k < 6; k++) { for (int n = 0; n < 6; n++) { m_dHessian[k][n] = 0; } } // Initialize Subset R m_dSubAveR = 0; // R_m m_dSubNorR = 0; // T_m // Feed the gray intensity to subset R for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dSubsetR[l*m_iSubsetW+m] = input_mdR[int(input_dPXY[offset*2+0] - m_iSubsetY + l)*m_iWidth+int(input_dPXY[offset*2+1] - m_iSubsetX + m)]; m_dSubAveR += (m_dSubsetR[l*m_iSubsetW+m] / (m_iSubsetH * m_iSubsetW)); // Evaluate the Jacbian dW/dp at (x, 0); m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+0] = 1; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+1] = m - m_iSubsetX; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+2] = l - m_iSubsetY; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+3] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+4] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+5] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+0] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+1] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+2] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+3] = 1; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+4] = m - m_iSubsetX; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+5] = l - m_iSubsetY; // Compute the steepest descent image DealtR*dW/dp for (int k = 0; k < 6; k++) { m_dRDescent[(l*m_iSubsetW+m)*6+k] = input_mdR[int(input_dPXY[offset*2+0] - m_iSubsetY + l)*m_iWidth+int(input_dPXY[offset*2+1] - m_iSubsetX + m)] * m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+k] + input_mdRy[int(input_dPXY[offset*2+0] - m_iSubsetY + l)*m_iWidth+int(input_dPXY[offset*2+1] - m_iSubsetX + m)] * m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+k]; } // Compute the Hessian matrix for (int k = 0; k < 6; k++) { for (int n = 0; n < 6; n++) { m_dHessianXY[((l*m_iSubsetW+m)*6+k)*6+n] = m_dRDescent[(l*m_iSubsetW+m)*6+k] * m_dRDescent[(l*m_iSubsetW+m)*6+n]; // Hessian matrix at each point m_dHessian[k][n] += m_dHessianXY[((l*m_iSubsetW+m)*6+k)*6+n]; // sum of Hessian matrix at all the points in subset R } } } } __syncthreads(); for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dSubsetAveR[l*m_iSubsetW+m] = m_dSubsetR[l*m_iSubsetW+m] - m_dSubAveR; // R_i - R_m m_dSubNorR += pow(m_dSubsetAveR[l*m_iSubsetW+m], 2); } } __syncthreads(); m_dSubNorR = sqrt(m_dSubNorR); // sqrt (Sigma(R_i - R_m)^2) // Invert the Hessian matrix (Gauss-Jordan algorithm) for (int l = 0; l < 6; l++) { for (int m = 0; m < 6; m++) { if (l == m) { m_dInvHessian[l][m] = 1; } else { m_dInvHessian[l][m] = 0; } } } __syncthreads(); for (int l = 0; l < 6; l++) { //Find pivot (maximum lth column element) in the rest (6-l) rows m_iTemp = l; for (int m = l + 1; m < 6; m++) { if (m_dHessian[m][l] > m_dHessian[m_iTemp][l]) { m_iTemp = m; } } // Swap the row which has maximum lth column element if (m_iTemp != l) { for (int k = 0; k < 6; k++) { m_dTemp = m_dHessian[l][k]; m_dHessian[l][k] = m_dHessian[m_iTemp][k]; m_dHessian[m_iTemp][k] = m_dTemp; m_dTemp = m_dInvHessian[l][k]; m_dInvHessian[l][k] = m_dInvHessian[m_iTemp][k]; m_dInvHessian[m_iTemp][k] = m_dTemp; } } __syncthreads(); // Perform row operation to form required identity matrix out of the Hessian matrix for (int m = 0; m < 6; m++) { m_dTemp = m_dHessian[m][l]; if (m != l) { for (int n = 0; n < 6; n++) { m_dInvHessian[m][n] -= m_dInvHessian[l][n] * m_dTemp / m_dHessian[l][l]; m_dHessian[m][n] -= m_dHessian[l][n] * m_dTemp / m_dHessian[l][l]; } } else { for (int n = 0; n < 6; n++) { m_dInvHessian[m][n] /= m_dTemp; m_dHessian[m][n] /= m_dTemp; } } } __syncthreads(); } // Initialize DeltaP, m_dDU = 0; m_dDUx = 0; m_dDUy = 0; m_dDV = 0; m_dDVx = 0; m_dDVy = 0; // Perform interative optimization, with pre-set maximum iteration step for (m_iIteration = 0; m_iIteration < m_iMaxiteration; m_iIteration++) { // Fill warpped image into Subset T m_dSubAveT = 0; m_dSubNorT = 0; for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { // Calculate the location of warped subset T m_dWarpX = input_dPXY[offset*2+1] + m_dWarp[0][0] * (m - m_iSubsetX) + m_dWarp[0][1] * (l - m_iSubsetY) + m_dWarp[0][2]; m_dWarpY = input_dPXY[offset*2+0] + m_dWarp[1][0] * (m - m_iSubsetX) + m_dWarp[1][1] * (l - m_iSubsetY) + m_dWarp[1][2]; m_iTempX = int(m_dWarpX); m_iTempY = int(m_dWarpY); m_dTempX = m_dWarpX - m_iTempX; m_dTempY = m_dWarpY - m_iTempY; // if it is integer-pixel location, feed the gray intensity of T into the subset T if ((m_dTempX == 0) && (m_dTempY == 0)) { m_dSubsetT[l*m_iSubsetW+m] = input_mdT[m_iTempY*m_iSubsetW+m_iTempX]; } else { // If it is sub-pixel location, estimate the gary intensity using interpolation m_dSubsetT[l*m_iSubsetW+m] = 0; for (int k = 0; k < 4; k++) { for (int n = 0; n < 4; n++) { m_dSubsetT[l*m_iSubsetW+m] += input_mBicubic[((m_iTempY*m_iWidth+m_iTempX)*4+k)*4+n] * pow(m_dTempY, k) * pow(m_dTempX, n); } } } m_dSubAveT += (m_dSubsetT[l*m_iWidth+m] / (m_iSubsetH * m_iSubsetW)); } } for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dSubsetAveT[l*m_iSubsetW+m] = m_dSubsetT[l*m_iSubsetW+m] - m_dSubAveT; // T_i - T_m m_dSubNorT += pow(m_dSubsetAveT[l*m_iSubsetW+m], 2); } } m_dSubNorT = sqrt(m_dSubNorT); // sqrt (Sigma(T_i - T_m)^2) // Compute the error image for (int k = 0; k < 6; k++) { m_dNumerator[k] = 0; } for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dError[l*m_iSubsetW+m] = (m_dSubNorR / m_dSubNorT) * m_dSubsetAveT[l*m_iSubsetW+m] - m_dSubsetAveR[l*m_iSubsetW+m]; // Compute the numerator for (int k = 0; k < 6; k++) { m_dNumerator[k] += (m_dRDescent[(l*m_iSubsetW+m)*6+k] * m_dError[l*m_iSubsetW+m]); } } } // Compute DeltaP for (int k = 0; k < 6; k++) { m_dDP[offset*6+k] = 0; for (int n = 0; n < 6; n++) { m_dDP[offset*6+k] += (m_dInvHessian[k][n] * m_dNumerator[n]); } } m_dDU = m_dDP[offset*6+0]; m_dDUx = m_dDP[offset*6+1]; m_dDUy = m_dDP[offset*6+2]; m_dDV = m_dDP[offset*6+3]; m_dDVx = m_dDP[offset*6+4]; m_dDVy = m_dDP[offset*6+5]; // Update the warp m_dTemp = (1 + m_dDUx) * (1 + m_dDVy) - m_dDUy * m_dDVx; //W(P) <- W(P) o W(DP)^-1 m_dWarp[0][0] = ((1 + m_dUx) * (1 + m_dDVy) - m_dUy * m_dDVx) / m_dTemp; m_dWarp[0][1] = (m_dUy * (1 + m_dDUx) - (1 + m_dUx) * m_dDUy) / m_dTemp; m_dWarp[0][2] = m_dU + (m_dUy * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - (1 + m_dUx) * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[1][0] = (m_dVx * (1 + m_dDVy) - (1 + m_dVy) * m_dDVx) / m_dTemp; m_dWarp[1][1] = ((1 + m_dVy) * (1 + m_dDUx) - m_dVx * m_dDUy) / m_dTemp; m_dWarp[1][2] = m_dV + ((1 + m_dVy) * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - m_dVx * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; // Update DeltaP output_dP[offset*6+0] = m_dWarp[0][2]; output_dP[offset*6+1] = m_dWarp[0][0] - 1; output_dP[offset*6+2] = m_dWarp[0][1]; output_dP[offset*6+3] = m_dWarp[1][2]; output_dP[offset*6+4] = m_dWarp[1][0]; output_dP[offset*6+5] = m_dWarp[1][1] - 1; m_dU = output_dP[offset*6+0]; m_dUx = output_dP[offset*6+1]; m_dUy = output_dP[offset*6+2]; m_dV = output_dP[offset*6+3]; m_dVx = output_dP[offset*6+4]; m_dVy =output_dP[offset*6+5]; //Check if the norm of DeltaP is small enough if (sqrt(pow(m_dDP[(row*m_iNumberX+col)*6+0], 2) + pow(m_dDP[(row*m_iNumberX+col)*6+1] * m_iSubsetX, 2) + pow(m_dDP[offset*6+2] * m_iSubsetY, 2) + pow(m_dDP[offset*6+3], 2) + pow(m_dDP[offset*6+4] * m_iSubsetX, 2) + pow(m_dDP[offset*6+5] * m_iSubsetY, 2)) < m_dNormDeltaP) { break; } } m_iIterationNum[row*m_iNumberX+col] = m_iIteration; // save iteration steps taken at this POI } } void launch_ICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, const float& m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, const int& m_iNumberY, const int& m_iNumberX, const int& m_iSubsetH, const int& m_iSubsetW, const int& m_iWidth, const int& m_iHeight, const int& m_iSubsetY, const int& m_iSubsetX, const int& m_iMaxiteration, float* output_dP, int* m_iIterationNum, float& time) { StopWatchWin icgn; float *dm_dSubsetR, *dm_dSubsetT, *dm_dJacobian, *dm_dRdescent, *dm_dHessianXY, *dm_dSubsetAveR, *dm_dSubsetAveT, *dm_dError, *dm_dDP; hipMalloc((void**)&dm_dSubsetR, m_iSubsetH*m_iSubsetW*sizeof(float)); hipMalloc((void**)&dm_dSubsetT, m_iSubsetH*m_iSubsetW*sizeof(float)); hipMalloc((void**)&dm_dJacobian, m_iSubsetH*m_iSubsetW*2*6*sizeof(float)); hipMalloc((void**)&dm_dRdescent, m_iSubsetH*m_iSubsetW*6*sizeof(float)); hipMalloc((void**)&dm_dHessianXY, m_iSubsetH*m_iSubsetW*6*6*sizeof(float)); hipMalloc((void**)&dm_dSubsetAveR, m_iSubsetH*m_iSubsetW*sizeof(float)); hipMalloc((void**)&dm_dSubsetAveT, m_iSubsetH*m_iSubsetW*sizeof(float)); hipMalloc((void**)&dm_dError, m_iSubsetH*m_iSubsetW*sizeof(float)); hipMalloc((void**)&dm_dDP, m_iNumberX*m_iNumberY*6*sizeof(float)); dim3 dimGrid((m_iNumberY-1)/16+1, (m_iNumberX-1)/16+1,1); dim3 dimBlock(16, 16,1); icgn.start(); hipLaunchKernelGGL(( computeICGN), dim3(dimGrid),dim3(dimBlock), 0, 0, input_dPXY, input_mdR, input_mdRx, input_mdRy, m_dNormDeltaP, input_mdT, input_mBicubic, input_iU, input_iV, m_iNumberY, m_iNumberX, m_iSubsetH, m_iSubsetW, m_iWidth, m_iHeight, m_iSubsetY, m_iSubsetX, m_iMaxiteration, output_dP,m_iIterationNum, dm_dSubsetR, dm_dSubsetT, dm_dJacobian,dm_dRdescent,dm_dHessianXY,dm_dSubsetAveR,dm_dSubsetAveT,dm_dError,dm_dDP); icgn.stop(); time = icgn.getTime(); hipFree(dm_dSubsetR); hipFree(dm_dSubsetT); hipFree(dm_dJacobian); hipFree(dm_dRdescent); hipFree(dm_dHessianXY); hipFree(dm_dSubsetAveR); hipFree(dm_dSubsetAveT); hipFree(dm_dError); hipFree(dm_dDP); }
5fc060817b2404599b6b5d9a6db9cf63dffed841.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "helper_functions.h" #include "IC_GN.cuh" #include <stdio.h> __global__ void computeICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, float m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, int m_iNumberY, int m_iNumberX, int m_iSubsetH, int m_iSubsetW, int m_iWidth, int m_iHeight, int m_iSubsetY, int m_iSubsetX, int m_iMaxiteration, float* output_dP, int* m_iIterationNum, float* m_dSubsetR, float* m_dSubsetT, float* m_dJacobian, float* m_dRDescent, float* m_dHessianXY,float* m_dSubsetAveR, float* m_dSubsetAveT, float* m_dError, float* m_dDP) /*Input: all the const variables Output: deformation P matrix Strategy: Each thread computes one of the 21*21 POIs */ { unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int offset = row*m_iNumberX + col; float m_dU, m_dV, m_dUx, m_dUy, m_dVx, m_dVy; float m_dDU, m_dDV, m_dDUx, m_dDUy, m_dDVx, m_dDVy; float m_dSubAveR,m_dSubNorR, m_dSubAveT,m_dSubNorT; float m_dWarp[3][3], m_dHessian[6][6], m_dInvHessian[6][6]; float m_dNumerator[6]; float m_dTemp; int m_iTemp; int m_iIteration; float m_dWarpX, m_dWarpY; int m_iTempX,m_iTempY; float m_dTempX, m_dTempY; if(row<m_iNumberY && col<m_iNumberX){ m_dU = input_iU[offset]; m_dV = input_iV[offset]; m_dUx = 0; m_dUy = 0; m_dVx = 0; m_dVy = 0; output_dP[offset*6+0] = m_dU; output_dP[offset*6+1] = m_dUx; output_dP[offset*6+2] = m_dUy; output_dP[offset*6+3] = m_dV; output_dP[offset*6+4] = m_dVx; output_dP[offset*6+5] = m_dVy; // Initialize the warp matrix m_dWarp[0][0] = 1 + m_dUx; m_dWarp[0][1] = m_dUy; m_dWarp[0][2] = m_dU; m_dWarp[1][0] = m_dVx; m_dWarp[1][1] = 1 + m_dVy; m_dWarp[1][2] = m_dV; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; // Initialize the Hessian matrix in subset R for (int k = 0; k < 6; k++) { for (int n = 0; n < 6; n++) { m_dHessian[k][n] = 0; } } // Initialize Subset R m_dSubAveR = 0; // R_m m_dSubNorR = 0; // T_m // Feed the gray intensity to subset R for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dSubsetR[l*m_iSubsetW+m] = input_mdR[int(input_dPXY[offset*2+0] - m_iSubsetY + l)*m_iWidth+int(input_dPXY[offset*2+1] - m_iSubsetX + m)]; m_dSubAveR += (m_dSubsetR[l*m_iSubsetW+m] / (m_iSubsetH * m_iSubsetW)); // Evaluate the Jacbian dW/dp at (x, 0); m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+0] = 1; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+1] = m - m_iSubsetX; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+2] = l - m_iSubsetY; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+3] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+4] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+5] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+0] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+1] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+2] = 0; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+3] = 1; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+4] = m - m_iSubsetX; m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+5] = l - m_iSubsetY; // Compute the steepest descent image DealtR*dW/dp for (int k = 0; k < 6; k++) { m_dRDescent[(l*m_iSubsetW+m)*6+k] = input_mdR[int(input_dPXY[offset*2+0] - m_iSubsetY + l)*m_iWidth+int(input_dPXY[offset*2+1] - m_iSubsetX + m)] * m_dJacobian[((l*m_iSubsetW+m)*2+0)*6+k] + input_mdRy[int(input_dPXY[offset*2+0] - m_iSubsetY + l)*m_iWidth+int(input_dPXY[offset*2+1] - m_iSubsetX + m)] * m_dJacobian[((l*m_iSubsetW+m)*2+1)*6+k]; } // Compute the Hessian matrix for (int k = 0; k < 6; k++) { for (int n = 0; n < 6; n++) { m_dHessianXY[((l*m_iSubsetW+m)*6+k)*6+n] = m_dRDescent[(l*m_iSubsetW+m)*6+k] * m_dRDescent[(l*m_iSubsetW+m)*6+n]; // Hessian matrix at each point m_dHessian[k][n] += m_dHessianXY[((l*m_iSubsetW+m)*6+k)*6+n]; // sum of Hessian matrix at all the points in subset R } } } } __syncthreads(); for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dSubsetAveR[l*m_iSubsetW+m] = m_dSubsetR[l*m_iSubsetW+m] - m_dSubAveR; // R_i - R_m m_dSubNorR += pow(m_dSubsetAveR[l*m_iSubsetW+m], 2); } } __syncthreads(); m_dSubNorR = sqrt(m_dSubNorR); // sqrt (Sigma(R_i - R_m)^2) // Invert the Hessian matrix (Gauss-Jordan algorithm) for (int l = 0; l < 6; l++) { for (int m = 0; m < 6; m++) { if (l == m) { m_dInvHessian[l][m] = 1; } else { m_dInvHessian[l][m] = 0; } } } __syncthreads(); for (int l = 0; l < 6; l++) { //Find pivot (maximum lth column element) in the rest (6-l) rows m_iTemp = l; for (int m = l + 1; m < 6; m++) { if (m_dHessian[m][l] > m_dHessian[m_iTemp][l]) { m_iTemp = m; } } // Swap the row which has maximum lth column element if (m_iTemp != l) { for (int k = 0; k < 6; k++) { m_dTemp = m_dHessian[l][k]; m_dHessian[l][k] = m_dHessian[m_iTemp][k]; m_dHessian[m_iTemp][k] = m_dTemp; m_dTemp = m_dInvHessian[l][k]; m_dInvHessian[l][k] = m_dInvHessian[m_iTemp][k]; m_dInvHessian[m_iTemp][k] = m_dTemp; } } __syncthreads(); // Perform row operation to form required identity matrix out of the Hessian matrix for (int m = 0; m < 6; m++) { m_dTemp = m_dHessian[m][l]; if (m != l) { for (int n = 0; n < 6; n++) { m_dInvHessian[m][n] -= m_dInvHessian[l][n] * m_dTemp / m_dHessian[l][l]; m_dHessian[m][n] -= m_dHessian[l][n] * m_dTemp / m_dHessian[l][l]; } } else { for (int n = 0; n < 6; n++) { m_dInvHessian[m][n] /= m_dTemp; m_dHessian[m][n] /= m_dTemp; } } } __syncthreads(); } // Initialize DeltaP, m_dDU = 0; m_dDUx = 0; m_dDUy = 0; m_dDV = 0; m_dDVx = 0; m_dDVy = 0; // Perform interative optimization, with pre-set maximum iteration step for (m_iIteration = 0; m_iIteration < m_iMaxiteration; m_iIteration++) { // Fill warpped image into Subset T m_dSubAveT = 0; m_dSubNorT = 0; for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { // Calculate the location of warped subset T m_dWarpX = input_dPXY[offset*2+1] + m_dWarp[0][0] * (m - m_iSubsetX) + m_dWarp[0][1] * (l - m_iSubsetY) + m_dWarp[0][2]; m_dWarpY = input_dPXY[offset*2+0] + m_dWarp[1][0] * (m - m_iSubsetX) + m_dWarp[1][1] * (l - m_iSubsetY) + m_dWarp[1][2]; m_iTempX = int(m_dWarpX); m_iTempY = int(m_dWarpY); m_dTempX = m_dWarpX - m_iTempX; m_dTempY = m_dWarpY - m_iTempY; // if it is integer-pixel location, feed the gray intensity of T into the subset T if ((m_dTempX == 0) && (m_dTempY == 0)) { m_dSubsetT[l*m_iSubsetW+m] = input_mdT[m_iTempY*m_iSubsetW+m_iTempX]; } else { // If it is sub-pixel location, estimate the gary intensity using interpolation m_dSubsetT[l*m_iSubsetW+m] = 0; for (int k = 0; k < 4; k++) { for (int n = 0; n < 4; n++) { m_dSubsetT[l*m_iSubsetW+m] += input_mBicubic[((m_iTempY*m_iWidth+m_iTempX)*4+k)*4+n] * pow(m_dTempY, k) * pow(m_dTempX, n); } } } m_dSubAveT += (m_dSubsetT[l*m_iWidth+m] / (m_iSubsetH * m_iSubsetW)); } } for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dSubsetAveT[l*m_iSubsetW+m] = m_dSubsetT[l*m_iSubsetW+m] - m_dSubAveT; // T_i - T_m m_dSubNorT += pow(m_dSubsetAveT[l*m_iSubsetW+m], 2); } } m_dSubNorT = sqrt(m_dSubNorT); // sqrt (Sigma(T_i - T_m)^2) // Compute the error image for (int k = 0; k < 6; k++) { m_dNumerator[k] = 0; } for (int l = 0; l < m_iSubsetH; l++) { for (int m = 0; m < m_iSubsetW; m++) { m_dError[l*m_iSubsetW+m] = (m_dSubNorR / m_dSubNorT) * m_dSubsetAveT[l*m_iSubsetW+m] - m_dSubsetAveR[l*m_iSubsetW+m]; // Compute the numerator for (int k = 0; k < 6; k++) { m_dNumerator[k] += (m_dRDescent[(l*m_iSubsetW+m)*6+k] * m_dError[l*m_iSubsetW+m]); } } } // Compute DeltaP for (int k = 0; k < 6; k++) { m_dDP[offset*6+k] = 0; for (int n = 0; n < 6; n++) { m_dDP[offset*6+k] += (m_dInvHessian[k][n] * m_dNumerator[n]); } } m_dDU = m_dDP[offset*6+0]; m_dDUx = m_dDP[offset*6+1]; m_dDUy = m_dDP[offset*6+2]; m_dDV = m_dDP[offset*6+3]; m_dDVx = m_dDP[offset*6+4]; m_dDVy = m_dDP[offset*6+5]; // Update the warp m_dTemp = (1 + m_dDUx) * (1 + m_dDVy) - m_dDUy * m_dDVx; //W(P) <- W(P) o W(DP)^-1 m_dWarp[0][0] = ((1 + m_dUx) * (1 + m_dDVy) - m_dUy * m_dDVx) / m_dTemp; m_dWarp[0][1] = (m_dUy * (1 + m_dDUx) - (1 + m_dUx) * m_dDUy) / m_dTemp; m_dWarp[0][2] = m_dU + (m_dUy * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - (1 + m_dUx) * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[1][0] = (m_dVx * (1 + m_dDVy) - (1 + m_dVy) * m_dDVx) / m_dTemp; m_dWarp[1][1] = ((1 + m_dVy) * (1 + m_dDUx) - m_dVx * m_dDUy) / m_dTemp; m_dWarp[1][2] = m_dV + ((1 + m_dVy) * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - m_dVx * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; // Update DeltaP output_dP[offset*6+0] = m_dWarp[0][2]; output_dP[offset*6+1] = m_dWarp[0][0] - 1; output_dP[offset*6+2] = m_dWarp[0][1]; output_dP[offset*6+3] = m_dWarp[1][2]; output_dP[offset*6+4] = m_dWarp[1][0]; output_dP[offset*6+5] = m_dWarp[1][1] - 1; m_dU = output_dP[offset*6+0]; m_dUx = output_dP[offset*6+1]; m_dUy = output_dP[offset*6+2]; m_dV = output_dP[offset*6+3]; m_dVx = output_dP[offset*6+4]; m_dVy =output_dP[offset*6+5]; //Check if the norm of DeltaP is small enough if (sqrt(pow(m_dDP[(row*m_iNumberX+col)*6+0], 2) + pow(m_dDP[(row*m_iNumberX+col)*6+1] * m_iSubsetX, 2) + pow(m_dDP[offset*6+2] * m_iSubsetY, 2) + pow(m_dDP[offset*6+3], 2) + pow(m_dDP[offset*6+4] * m_iSubsetX, 2) + pow(m_dDP[offset*6+5] * m_iSubsetY, 2)) < m_dNormDeltaP) { break; } } m_iIterationNum[row*m_iNumberX+col] = m_iIteration; // save iteration steps taken at this POI } } void launch_ICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, const float& m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, const int& m_iNumberY, const int& m_iNumberX, const int& m_iSubsetH, const int& m_iSubsetW, const int& m_iWidth, const int& m_iHeight, const int& m_iSubsetY, const int& m_iSubsetX, const int& m_iMaxiteration, float* output_dP, int* m_iIterationNum, float& time) { StopWatchWin icgn; float *dm_dSubsetR, *dm_dSubsetT, *dm_dJacobian, *dm_dRdescent, *dm_dHessianXY, *dm_dSubsetAveR, *dm_dSubsetAveT, *dm_dError, *dm_dDP; cudaMalloc((void**)&dm_dSubsetR, m_iSubsetH*m_iSubsetW*sizeof(float)); cudaMalloc((void**)&dm_dSubsetT, m_iSubsetH*m_iSubsetW*sizeof(float)); cudaMalloc((void**)&dm_dJacobian, m_iSubsetH*m_iSubsetW*2*6*sizeof(float)); cudaMalloc((void**)&dm_dRdescent, m_iSubsetH*m_iSubsetW*6*sizeof(float)); cudaMalloc((void**)&dm_dHessianXY, m_iSubsetH*m_iSubsetW*6*6*sizeof(float)); cudaMalloc((void**)&dm_dSubsetAveR, m_iSubsetH*m_iSubsetW*sizeof(float)); cudaMalloc((void**)&dm_dSubsetAveT, m_iSubsetH*m_iSubsetW*sizeof(float)); cudaMalloc((void**)&dm_dError, m_iSubsetH*m_iSubsetW*sizeof(float)); cudaMalloc((void**)&dm_dDP, m_iNumberX*m_iNumberY*6*sizeof(float)); dim3 dimGrid((m_iNumberY-1)/16+1, (m_iNumberX-1)/16+1,1); dim3 dimBlock(16, 16,1); icgn.start(); computeICGN<<<dimGrid,dimBlock>>>(input_dPXY, input_mdR, input_mdRx, input_mdRy, m_dNormDeltaP, input_mdT, input_mBicubic, input_iU, input_iV, m_iNumberY, m_iNumberX, m_iSubsetH, m_iSubsetW, m_iWidth, m_iHeight, m_iSubsetY, m_iSubsetX, m_iMaxiteration, output_dP,m_iIterationNum, dm_dSubsetR, dm_dSubsetT, dm_dJacobian,dm_dRdescent,dm_dHessianXY,dm_dSubsetAveR,dm_dSubsetAveT,dm_dError,dm_dDP); icgn.stop(); time = icgn.getTime(); cudaFree(dm_dSubsetR); cudaFree(dm_dSubsetT); cudaFree(dm_dJacobian); cudaFree(dm_dRdescent); cudaFree(dm_dHessianXY); cudaFree(dm_dSubsetAveR); cudaFree(dm_dSubsetAveT); cudaFree(dm_dError); cudaFree(dm_dDP); }
308542782fe50a9577b9ae7375f9d88de486cba6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define PROCESSING_ON_GPU #include "../../SharedFuncts/SharedBPProcessingFuncts.h" #include "../../bpStereoCudaParameters.h" #undef PROCESSING_ON_GPU template<typename T, typename U> __device__ inline void msgStereo(int xVal, int yVal, levelProperties& currentLevelProperties, T* messageValsNeighbor1Shared, T* messageValsNeighbor2Shared, T* messageValsNeighbor3Shared, T* dataCostsShared, T* messageValsNeighbor1, T* messageValsNeighbor2, T* messageValsNeighbor3, T* dataCosts, T* dstMessageArray, U disc_k_bp, bool dataAligned) { printf("Data type not supported\n"); } template<> __device__ inline void msgStereo<half, half>(int xVal, int yVal, levelProperties& currentLevelProperties, half* messageValsNeighbor1Shared, half* messageValsNeighbor2Shared, half* messageValsNeighbor3Shared, half* dataCostsShared, half* messageValsNeighbor1, half* messageValsNeighbor2, half* messageValsNeighbor3, half* dataCosts, half* dstMessageArray, half disc_k_bp, bool dataAligned) { //printf("USED SHARED MEMORY\n"); // aggregate and find min half minimum = INF_BP; int startIndexDstShared = 2*(threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x); int indexIndexDstShared = startIndexDstShared; int halfIndexSharedVals[2] = {1, (2*BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP)-1}; int indexIntervalNextHalfIndexSharedVals = 0; half dst[NUM_POSSIBLE_DISPARITY_VALUES]; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1Shared[indexIndexDstShared] + messageValsNeighbor2Shared[indexIndexDstShared] + messageValsNeighbor3Shared[indexIndexDstShared] + dataCostsShared[indexIndexDstShared]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } indexIndexDstShared += halfIndexSharedVals[indexIntervalNextHalfIndexSharedVals]; indexIntervalNextHalfIndexSharedVals = !indexIntervalNextHalfIndexSharedVals; } //#pragma unroll 64 for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor2[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor3[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + dataCosts[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") //#if (NUM_POSSIBLE_DISPARITY_VALUES - 1) <= DISPARITY_START_SHARED_MEM //no shared memory used // dtStereo<float>(dst); //#else dtStereo<half>(dst); //#endif // truncate minimum += disc_k_bp; // normalize half valToNormalize = 0.0f; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (minimum < dst[currentDisparity]) { dst[currentDisparity] = minimum; } valToNormalize += dst[currentDisparity]; } valToNormalize /= ((half) NUM_POSSIBLE_DISPARITY_VALUES); int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] -= valToNormalize; dstMessageArray[destMessageArrayIndex] = dst[currentDisparity]; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } template<> __device__ inline void msgStereo<float, float>(int xVal, int yVal, levelProperties& currentLevelProperties, float* messageValsNeighbor1Shared, float* messageValsNeighbor2Shared, float* messageValsNeighbor3Shared, float* dataCostsShared, float* messageValsNeighbor1, float* messageValsNeighbor2, float* messageValsNeighbor3, float* dataCosts, float* dstMessageArray, float disc_k_bp, bool dataAligned) { //printf("USED SHARED MEMORY\n"); // aggregate and find min float minimum = INF_BP; int startIndexDstShared = threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x; int indexIndexDstShared = startIndexDstShared; float dst[NUM_POSSIBLE_DISPARITY_VALUES]; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1Shared[indexIndexDstShared] + messageValsNeighbor2Shared[indexIndexDstShared] + messageValsNeighbor3Shared[indexIndexDstShared] + dataCostsShared[indexIndexDstShared]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } indexIndexDstShared += BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP; } //#pragma unroll 64 for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor2[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor3[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + dataCosts[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") //#if (NUM_POSSIBLE_DISPARITY_VALUES - 1) <= DISPARITY_START_SHARED_MEM //no shared memory used // dtStereo<float>(dst); //#else dtStereo<float>(dst); //#endif // truncate minimum += disc_k_bp; // normalize float valToNormalize = 0.0f; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (minimum < dst[currentDisparity]) { dst[currentDisparity] = minimum; } valToNormalize += dst[currentDisparity]; } valToNormalize /= ((float) NUM_POSSIBLE_DISPARITY_VALUES); int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] -= valToNormalize; dstMessageArray[destMessageArrayIndex] = dst[currentDisparity]; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } template<typename T, typename U> ARCHITECTURE_ADDITION inline void runBPIterationInOutDataInLocalMem(int xVal, int yVal, levelProperties& currentLevelProperties, T* prevUMessageShared, T* prevDMessageShared, T* prevLMessageShared, T* prevRMessageShared, T* dataMessageShared, T* prevUMessage, T* prevDMessage, T* prevLMessage, T* prevRMessage, T* dataMessage, T* currentUMessageArray, T* currentDMessageArray, T* currentLMessageArray, T* currentRMessageArray, U disc_k_bp, bool dataAligned) { msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevLMessage, prevRMessage, dataMessage, currentUMessageArray, disc_k_bp, dataAligned); msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevDMessage, prevLMessage, prevRMessage, dataMessage, currentDMessageArray, disc_k_bp, dataAligned); msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevRMessage, dataMessage, currentRMessageArray, disc_k_bp, dataAligned); msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, dataMessage, currentLMessageArray, disc_k_bp, dataAligned); } #if CURRENT_DATA_TYPE_PROCESSING_FROM_PYTHON == DATA_TYPE_PROCESSING_FLOAT //device portion of the kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the //"checkerboard" scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost //this function uses local memory to store the message and data values at each disparity in the intermediate step of current message computation //this function uses linear memory bound to textures to access the current data and message values template<> ARCHITECTURE_ADDITION inline void runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMemPixel<float, float>( int xVal, int yVal, int checkerboardToUpdate, levelProperties& currentLevelProperties, float* dataCostStereoCheckerboard1, float* dataCostStereoCheckerboard2, float* messageUDeviceCurrentCheckerboard1, float* messageDDeviceCurrentCheckerboard1, float* messageLDeviceCurrentCheckerboard1, float* messageRDeviceCurrentCheckerboard1, float* messageUDeviceCurrentCheckerboard2, float* messageDDeviceCurrentCheckerboard2, float* messageLDeviceCurrentCheckerboard2, float* messageRDeviceCurrentCheckerboard2, float disc_k_bp, int offsetData, bool dataAligned) { int checkerboardAdjustment; //checkerboardAdjustment used for indexing into current checkerboard to update if (checkerboardToUpdate == CHECKERBOARD_PART_1) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } //may want to look into (xVal < (widthLevelCheckerboardPart - 1) since it may affect the edges //make sure that the current point is not an edge/corner that doesn't have four neighbors that can pass values to it //if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - 1)) && (yVal > 0) && (yVal < (heightLevel - 1))) if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (currentLevelProperties.widthCheckerboardLevel - checkerboardAdjustment)) && (yVal > 0) && (yVal < (currentLevelProperties.heightLevel - 1))) { #if (DISP_INDEX_START_REG_LOCAL_MEM < NUM_POSSIBLE_DISPARITY_VALUES) float prevUMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float prevDMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float prevLMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float prevRMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float dataMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; #else float* prevUMessage = nullptr; float* prevDMessage = nullptr; float* prevLMessage = nullptr; float* prevRMessage = nullptr; float* dataMessage = nullptr; #endif #if (DISP_INDEX_START_REG_LOCAL_MEM > 0) int numDataSharedMemoryArray = BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP * DISP_INDEX_START_REG_LOCAL_MEM; extern __shared__ float dstSharedMem[]; float *prevUMessageShared = dstSharedMem; float *prevDMessageShared = &dstSharedMem[numDataSharedMemoryArray]; float *prevLMessageShared = &dstSharedMem[2*numDataSharedMemoryArray]; float *prevRMessageShared = &dstSharedMem[3*numDataSharedMemoryArray]; float *dataMessageShared = &dstSharedMem[4*numDataSharedMemoryArray]; #else float *prevUMessageShared = nullptr; float *prevDMessageShared = nullptr; float *prevLMessageShared = nullptr; float *prevRMessageShared = nullptr; float *dataMessageShared = nullptr; #endif int startIndexDstShared = threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x; int indexIndexDstShared = startIndexDstShared; for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } indexIndexDstShared += BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP;; } for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } //uses the previous message values and data cost to calculate the current message values and store the results if (checkerboardToUpdate == CHECKERBOARD_PART_1) { runBPIterationInOutDataInLocalMem<float, float>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, (float) disc_k_bp, dataAligned); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { runBPIterationInOutDataInLocalMem<float, float>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2, (float) disc_k_bp, dataAligned); } } } #elif CURRENT_DATA_TYPE_PROCESSING_FROM_PYTHON == DATA_TYPE_PROCESSING_HALF //device portion of the kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the //"checkerboard" scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost //this function uses local memory to store the message and data values at each disparity in the intermediate step of current message computation //this function uses linear memory bound to textures to access the current data and message values template<> ARCHITECTURE_ADDITION inline void runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMemPixel<half, half>( int xVal, int yVal, int checkerboardToUpdate, levelProperties& currentLevelProperties, half* dataCostStereoCheckerboard1, half* dataCostStereoCheckerboard2, half* messageUDeviceCurrentCheckerboard1, half* messageDDeviceCurrentCheckerboard1, half* messageLDeviceCurrentCheckerboard1, half* messageRDeviceCurrentCheckerboard1, half* messageUDeviceCurrentCheckerboard2, half* messageDDeviceCurrentCheckerboard2, half* messageLDeviceCurrentCheckerboard2, half* messageRDeviceCurrentCheckerboard2, float disc_k_bp, int offsetData, bool dataAligned) { int checkerboardAdjustment; //checkerboardAdjustment used for indexing into current checkerboard to update if (checkerboardToUpdate == CHECKERBOARD_PART_1) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } //may want to look into (xVal < (widthLevelCheckerboardPart - 1) since it may affect the edges //make sure that the current point is not an edge/corner that doesn't have four neighbors that can pass values to it //if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - 1)) && (yVal > 0) && (yVal < (heightLevel - 1))) if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (currentLevelProperties.widthCheckerboardLevel - checkerboardAdjustment)) && (yVal > 0) && (yVal < (currentLevelProperties.heightLevel - 1))) { #if (DISP_INDEX_START_REG_LOCAL_MEM < NUM_POSSIBLE_DISPARITY_VALUES) half prevUMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half prevDMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half prevLMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half prevRMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half dataMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; #else half* prevUMessage = nullptr; half* prevDMessage = nullptr; half* prevLMessage = nullptr; half* prevRMessage = nullptr; half* dataMessage = nullptr; #endif #if (DISP_INDEX_START_REG_LOCAL_MEM > 0) int numDataSharedMemoryArray = BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP * (DISP_INDEX_START_REG_LOCAL_MEM + (DISP_INDEX_START_REG_LOCAL_MEM % 2)); extern __shared__ half dstSharedMem[]; half *prevUMessageShared = dstSharedMem; half *prevDMessageShared = &dstSharedMem[numDataSharedMemoryArray]; half *prevLMessageShared = &dstSharedMem[2*numDataSharedMemoryArray]; half *prevRMessageShared = &dstSharedMem[3*numDataSharedMemoryArray]; half *dataMessageShared = &dstSharedMem[4*numDataSharedMemoryArray]; #else half *prevUMessageShared = nullptr; half *prevDMessageShared = nullptr; half *prevLMessageShared = nullptr; half *prevRMessageShared = nullptr; half *dataMessageShared = nullptr; #endif int startIndexDstShared = 2*(threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x); int indexIndexDstShared = startIndexDstShared; int halfIndexSharedVals[2] = {1, (2*BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP)-1}; int indexIntervalNextHalfIndexSharedVals = 0; for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } indexIndexDstShared += halfIndexSharedVals[indexIntervalNextHalfIndexSharedVals]; indexIntervalNextHalfIndexSharedVals = !indexIntervalNextHalfIndexSharedVals; } for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } //uses the previous message values and data cost to calculate the current message values and store the results if (checkerboardToUpdate == CHECKERBOARD_PART_1) { runBPIterationInOutDataInLocalMem<half, half>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, (half) disc_k_bp, dataAligned); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { runBPIterationInOutDataInLocalMem<half, half>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2, (half) disc_k_bp, dataAligned); } } } #endif
308542782fe50a9577b9ae7375f9d88de486cba6.cu
#define PROCESSING_ON_GPU #include "../../SharedFuncts/SharedBPProcessingFuncts.h" #include "../../bpStereoCudaParameters.h" #undef PROCESSING_ON_GPU template<typename T, typename U> __device__ inline void msgStereo(int xVal, int yVal, levelProperties& currentLevelProperties, T* messageValsNeighbor1Shared, T* messageValsNeighbor2Shared, T* messageValsNeighbor3Shared, T* dataCostsShared, T* messageValsNeighbor1, T* messageValsNeighbor2, T* messageValsNeighbor3, T* dataCosts, T* dstMessageArray, U disc_k_bp, bool dataAligned) { printf("Data type not supported\n"); } template<> __device__ inline void msgStereo<half, half>(int xVal, int yVal, levelProperties& currentLevelProperties, half* messageValsNeighbor1Shared, half* messageValsNeighbor2Shared, half* messageValsNeighbor3Shared, half* dataCostsShared, half* messageValsNeighbor1, half* messageValsNeighbor2, half* messageValsNeighbor3, half* dataCosts, half* dstMessageArray, half disc_k_bp, bool dataAligned) { //printf("USED SHARED MEMORY\n"); // aggregate and find min half minimum = INF_BP; int startIndexDstShared = 2*(threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x); int indexIndexDstShared = startIndexDstShared; int halfIndexSharedVals[2] = {1, (2*BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP)-1}; int indexIntervalNextHalfIndexSharedVals = 0; half dst[NUM_POSSIBLE_DISPARITY_VALUES]; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1Shared[indexIndexDstShared] + messageValsNeighbor2Shared[indexIndexDstShared] + messageValsNeighbor3Shared[indexIndexDstShared] + dataCostsShared[indexIndexDstShared]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } indexIndexDstShared += halfIndexSharedVals[indexIntervalNextHalfIndexSharedVals]; indexIntervalNextHalfIndexSharedVals = !indexIntervalNextHalfIndexSharedVals; } //#pragma unroll 64 for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor2[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor3[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + dataCosts[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") //#if (NUM_POSSIBLE_DISPARITY_VALUES - 1) <= DISPARITY_START_SHARED_MEM //no shared memory used // dtStereo<float>(dst); //#else dtStereo<half>(dst); //#endif // truncate minimum += disc_k_bp; // normalize half valToNormalize = 0.0f; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (minimum < dst[currentDisparity]) { dst[currentDisparity] = minimum; } valToNormalize += dst[currentDisparity]; } valToNormalize /= ((half) NUM_POSSIBLE_DISPARITY_VALUES); int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] -= valToNormalize; dstMessageArray[destMessageArrayIndex] = dst[currentDisparity]; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } template<> __device__ inline void msgStereo<float, float>(int xVal, int yVal, levelProperties& currentLevelProperties, float* messageValsNeighbor1Shared, float* messageValsNeighbor2Shared, float* messageValsNeighbor3Shared, float* dataCostsShared, float* messageValsNeighbor1, float* messageValsNeighbor2, float* messageValsNeighbor3, float* dataCosts, float* dstMessageArray, float disc_k_bp, bool dataAligned) { //printf("USED SHARED MEMORY\n"); // aggregate and find min float minimum = INF_BP; int startIndexDstShared = threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x; int indexIndexDstShared = startIndexDstShared; float dst[NUM_POSSIBLE_DISPARITY_VALUES]; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1Shared[indexIndexDstShared] + messageValsNeighbor2Shared[indexIndexDstShared] + messageValsNeighbor3Shared[indexIndexDstShared] + dataCostsShared[indexIndexDstShared]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } indexIndexDstShared += BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP; } //#pragma unroll 64 for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor2[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + messageValsNeighbor3[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] + dataCosts[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM]; if (dst[currentDisparity] < minimum) { minimum = dst[currentDisparity]; } } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") //#if (NUM_POSSIBLE_DISPARITY_VALUES - 1) <= DISPARITY_START_SHARED_MEM //no shared memory used // dtStereo<float>(dst); //#else dtStereo<float>(dst); //#endif // truncate minimum += disc_k_bp; // normalize float valToNormalize = 0.0f; //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (minimum < dst[currentDisparity]) { dst[currentDisparity] = minimum; } valToNormalize += dst[currentDisparity]; } valToNormalize /= ((float) NUM_POSSIBLE_DISPARITY_VALUES); int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); //#pragma unroll 64 for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] -= valToNormalize; dstMessageArray[destMessageArrayIndex] = dst[currentDisparity]; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } template<typename T, typename U> ARCHITECTURE_ADDITION inline void runBPIterationInOutDataInLocalMem(int xVal, int yVal, levelProperties& currentLevelProperties, T* prevUMessageShared, T* prevDMessageShared, T* prevLMessageShared, T* prevRMessageShared, T* dataMessageShared, T* prevUMessage, T* prevDMessage, T* prevLMessage, T* prevRMessage, T* dataMessage, T* currentUMessageArray, T* currentDMessageArray, T* currentLMessageArray, T* currentRMessageArray, U disc_k_bp, bool dataAligned) { msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevLMessage, prevRMessage, dataMessage, currentUMessageArray, disc_k_bp, dataAligned); msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevDMessage, prevLMessage, prevRMessage, dataMessage, currentDMessageArray, disc_k_bp, dataAligned); msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevRMessage, dataMessage, currentRMessageArray, disc_k_bp, dataAligned); msgStereo<T, U>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, dataMessage, currentLMessageArray, disc_k_bp, dataAligned); } #if CURRENT_DATA_TYPE_PROCESSING_FROM_PYTHON == DATA_TYPE_PROCESSING_FLOAT //device portion of the kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the //"checkerboard" scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost //this function uses local memory to store the message and data values at each disparity in the intermediate step of current message computation //this function uses linear memory bound to textures to access the current data and message values template<> ARCHITECTURE_ADDITION inline void runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMemPixel<float, float>( int xVal, int yVal, int checkerboardToUpdate, levelProperties& currentLevelProperties, float* dataCostStereoCheckerboard1, float* dataCostStereoCheckerboard2, float* messageUDeviceCurrentCheckerboard1, float* messageDDeviceCurrentCheckerboard1, float* messageLDeviceCurrentCheckerboard1, float* messageRDeviceCurrentCheckerboard1, float* messageUDeviceCurrentCheckerboard2, float* messageDDeviceCurrentCheckerboard2, float* messageLDeviceCurrentCheckerboard2, float* messageRDeviceCurrentCheckerboard2, float disc_k_bp, int offsetData, bool dataAligned) { int checkerboardAdjustment; //checkerboardAdjustment used for indexing into current checkerboard to update if (checkerboardToUpdate == CHECKERBOARD_PART_1) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } //may want to look into (xVal < (widthLevelCheckerboardPart - 1) since it may affect the edges //make sure that the current point is not an edge/corner that doesn't have four neighbors that can pass values to it //if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - 1)) && (yVal > 0) && (yVal < (heightLevel - 1))) if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (currentLevelProperties.widthCheckerboardLevel - checkerboardAdjustment)) && (yVal > 0) && (yVal < (currentLevelProperties.heightLevel - 1))) { #if (DISP_INDEX_START_REG_LOCAL_MEM < NUM_POSSIBLE_DISPARITY_VALUES) float prevUMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float prevDMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float prevLMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float prevRMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; float dataMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; #else float* prevUMessage = nullptr; float* prevDMessage = nullptr; float* prevLMessage = nullptr; float* prevRMessage = nullptr; float* dataMessage = nullptr; #endif #if (DISP_INDEX_START_REG_LOCAL_MEM > 0) int numDataSharedMemoryArray = BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP * DISP_INDEX_START_REG_LOCAL_MEM; extern __shared__ float dstSharedMem[]; float *prevUMessageShared = dstSharedMem; float *prevDMessageShared = &dstSharedMem[numDataSharedMemoryArray]; float *prevLMessageShared = &dstSharedMem[2*numDataSharedMemoryArray]; float *prevRMessageShared = &dstSharedMem[3*numDataSharedMemoryArray]; float *dataMessageShared = &dstSharedMem[4*numDataSharedMemoryArray]; #else float *prevUMessageShared = nullptr; float *prevDMessageShared = nullptr; float *prevLMessageShared = nullptr; float *prevRMessageShared = nullptr; float *dataMessageShared = nullptr; #endif int startIndexDstShared = threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x; int indexIndexDstShared = startIndexDstShared; for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } indexIndexDstShared += BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP;; } for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } //uses the previous message values and data cost to calculate the current message values and store the results if (checkerboardToUpdate == CHECKERBOARD_PART_1) { runBPIterationInOutDataInLocalMem<float, float>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, (float) disc_k_bp, dataAligned); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { runBPIterationInOutDataInLocalMem<float, float>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2, (float) disc_k_bp, dataAligned); } } } #elif CURRENT_DATA_TYPE_PROCESSING_FROM_PYTHON == DATA_TYPE_PROCESSING_HALF //device portion of the kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the //"checkerboard" scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost //this function uses local memory to store the message and data values at each disparity in the intermediate step of current message computation //this function uses linear memory bound to textures to access the current data and message values template<> ARCHITECTURE_ADDITION inline void runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMemPixel<half, half>( int xVal, int yVal, int checkerboardToUpdate, levelProperties& currentLevelProperties, half* dataCostStereoCheckerboard1, half* dataCostStereoCheckerboard2, half* messageUDeviceCurrentCheckerboard1, half* messageDDeviceCurrentCheckerboard1, half* messageLDeviceCurrentCheckerboard1, half* messageRDeviceCurrentCheckerboard1, half* messageUDeviceCurrentCheckerboard2, half* messageDDeviceCurrentCheckerboard2, half* messageLDeviceCurrentCheckerboard2, half* messageRDeviceCurrentCheckerboard2, float disc_k_bp, int offsetData, bool dataAligned) { int checkerboardAdjustment; //checkerboardAdjustment used for indexing into current checkerboard to update if (checkerboardToUpdate == CHECKERBOARD_PART_1) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } //may want to look into (xVal < (widthLevelCheckerboardPart - 1) since it may affect the edges //make sure that the current point is not an edge/corner that doesn't have four neighbors that can pass values to it //if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - 1)) && (yVal > 0) && (yVal < (heightLevel - 1))) if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (currentLevelProperties.widthCheckerboardLevel - checkerboardAdjustment)) && (yVal > 0) && (yVal < (currentLevelProperties.heightLevel - 1))) { #if (DISP_INDEX_START_REG_LOCAL_MEM < NUM_POSSIBLE_DISPARITY_VALUES) half prevUMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half prevDMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half prevLMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half prevRMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; half dataMessage[NUM_POSSIBLE_DISPARITY_VALUES - DISP_INDEX_START_REG_LOCAL_MEM]; #else half* prevUMessage = nullptr; half* prevDMessage = nullptr; half* prevLMessage = nullptr; half* prevRMessage = nullptr; half* dataMessage = nullptr; #endif #if (DISP_INDEX_START_REG_LOCAL_MEM > 0) int numDataSharedMemoryArray = BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP * (DISP_INDEX_START_REG_LOCAL_MEM + (DISP_INDEX_START_REG_LOCAL_MEM % 2)); extern __shared__ half dstSharedMem[]; half *prevUMessageShared = dstSharedMem; half *prevDMessageShared = &dstSharedMem[numDataSharedMemoryArray]; half *prevLMessageShared = &dstSharedMem[2*numDataSharedMemoryArray]; half *prevRMessageShared = &dstSharedMem[3*numDataSharedMemoryArray]; half *dataMessageShared = &dstSharedMem[4*numDataSharedMemoryArray]; #else half *prevUMessageShared = nullptr; half *prevDMessageShared = nullptr; half *prevLMessageShared = nullptr; half *prevRMessageShared = nullptr; half *dataMessageShared = nullptr; #endif int startIndexDstShared = 2*(threadIdx.y * BLOCK_SIZE_WIDTH_BP + threadIdx.x); int indexIndexDstShared = startIndexDstShared; int halfIndexSharedVals[2] = {1, (2*BLOCK_SIZE_WIDTH_BP * BLOCK_SIZE_HEIGHT_BP)-1}; int indexIntervalNextHalfIndexSharedVals = 0; for (int currentDisparity = 0; currentDisparity < DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessageShared[indexIndexDstShared] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessageShared[indexIndexDstShared] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessageShared[indexIndexDstShared] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessageShared[indexIndexDstShared] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessageShared[indexIndexDstShared] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } indexIndexDstShared += halfIndexSharedVals[indexIntervalNextHalfIndexSharedVals]; indexIntervalNextHalfIndexSharedVals = !indexIntervalNextHalfIndexSharedVals; } for (int currentDisparity = DISP_INDEX_START_REG_LOCAL_MEM; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (checkerboardToUpdate == CHECKERBOARD_PART_1) { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { dataMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]); prevUMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal+1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevDMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(xVal, (yVal-1), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevLMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage((xVal + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); prevRMessage[currentDisparity - DISP_INDEX_START_REG_LOCAL_MEM] = (messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage(((xVal - 1) + checkerboardAdjustment), (yVal), currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } //uses the previous message values and data cost to calculate the current message values and store the results if (checkerboardToUpdate == CHECKERBOARD_PART_1) { runBPIterationInOutDataInLocalMem<half, half>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, (half) disc_k_bp, dataAligned); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { runBPIterationInOutDataInLocalMem<half, half>(xVal, yVal, currentLevelProperties, prevUMessageShared, prevDMessageShared, prevLMessageShared, prevRMessageShared, dataMessageShared, prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2, (half) disc_k_bp, dataAligned); } } } #endif
27c7b09dabd787889ca9c41c03c97654754126df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <searchers/IntBacktrackSearcher.h> #include <utils/Utils.h> #include <wrappers/Wrappers.h> void IntBacktrackSearcher::initialize(FlatZinc::FlatZincModel* fzModel, Statistics* stats) { variables = fzModel->intVariables; constraints = fzModel->intConstraints; chosenVariables.initialize(variables->count); chosenValues.initialize(variables->count); stack.initialize(&variables->domains.representations, stats); variablesChooser.initialzie(IntVariablesChooser::InOrder, variables, &chosenVariables); valuesChooser.initialzie(IntValuesChooser::InOrder, variables); propagator.initialize(variables, constraints, stats); backtrackingLevel = 0; backtrackingState = VariableNotChosen; #ifdef GPU varibalesBlockCount = KernelUtils::getBlockCount(variables->count, DEFAULT_BLOCK_SIZE); #endif switch (fzModel->method()) { case FlatZinc::FlatZincModel::Meth::SAT: { searchType = Satisfiability; } break; case FlatZinc::FlatZincModel::Meth::MAX: { searchType = Maximization; } break; case FlatZinc::FlatZincModel::Meth::MIN: { searchType = Minimization; } break; } if (searchType == Maximization or searchType == Minimization) { optVariable = fzModel->optVar(); optConstraint = fzModel->optConst(); } else { optVariable = -1; optConstraint = -1; } this->stats = stats; stats->varibalesCount = variables->count; stats->constraintsCount = constraints->count; } void IntBacktrackSearcher::deinitialize() { chosenVariables.deinitialize(); chosenValues.deinitialize(); stack.deinitialize(); propagator.deinitialize(); } cudaDevice bool IntBacktrackSearcher::getNextSolution() { bool solutionFound = false; while (backtrackingLevel >= 0 and (not solutionFound)) { switch (backtrackingState) { case VariableNotChosen: { #ifdef GPU hipLaunchKernelGGL(( Wrappers::saveState), dim3(varibalesBlockCount), dim3(DEFAULT_BLOCK_SIZE), 0, 0, &stack, backtrackingLevel); hipDeviceSynchronize(); #else stack.saveState(backtrackingLevel); #endif if (variablesChooser.getVariable(backtrackingLevel, &chosenVariable)) { chosenVariables.push_back(chosenVariable); backtrackingState = VariableChosen; } else { LogUtils::error(__PRETTY_FUNCTION__, "Failed to set variable"); } } break; case VariableChosen: { if (not variables->domains.isSingleton(chosenVariables.back())) { if (valuesChooser.getFirstValue(chosenVariables.back(), &chosenValue)) { chosenValues.push_back(chosenValue); variables->domains.fixValue(chosenVariables.back(), chosenValues.back()); backtrackingState = ValueChosen; } else { LogUtils::error(__PRETTY_FUNCTION__, "Failed to set first value"); } } else { chosenValues.push_back(variables->domains.getMin(chosenVariables.back())); backtrackingState = SuccessfulPropagation; } stats->nodesCount += 1; } break; case ValueChosen: { bool noEmptyDomains = propagator.propagateConstraints(); if (noEmptyDomains) { backtrackingState = SuccessfulPropagation; } else { backtrackingState = ValueChecked; stats->failuresCount += 1; } } break; case SuccessfulPropagation: { if (backtrackingLevel < variables->count - 1) { backtrackingLevel += 1; backtrackingState = VariableNotChosen; } else { backtrackingState = ValueChecked; if (propagator.verifyConstraints()) { solutionFound = true; } else { stats->failuresCount += 1; } } } break; case ValueChecked: { #ifdef GPU hipLaunchKernelGGL(( Wrappers::restoreState), dim3(varibalesBlockCount), dim3(DEFAULT_BLOCK_SIZE), 0, 0, &stack, backtrackingLevel); hipDeviceSynchronize(); #else stack.restoreState(backtrackingLevel); #endif if (valuesChooser.getNextValue(chosenVariables.back(), chosenValues.back(), &chosenValue)) { chosenValues.back() = chosenValue; variables->domains.fixValue(chosenVariables.back(), chosenValues.back()); backtrackingState = ValueChosen; } else { #ifdef GPU hipLaunchKernelGGL(( Wrappers::clearState), dim3(varibalesBlockCount), dim3(DEFAULT_BLOCK_SIZE), 0, 0, &stack, backtrackingLevel); hipDeviceSynchronize(); #else stack.clearState(backtrackingLevel); #endif backtrackingLevel -= 1; chosenVariables.pop_back(); chosenValues.pop_back(); } } break; } } if (solutionFound and (searchType == Maximization or searchType == Minimization)) { shrinkOptimizationBound(); } return solutionFound; } cudaDevice void IntBacktrackSearcher::shrinkOptimizationBound() { if (searchType == Maximization) { constraints->parameters[optConstraint][0] = variables->domains.getMin(optVariable) + 1; } else if (searchType == Minimization) { constraints->parameters[optConstraint][0] = variables->domains.getMin(optVariable) - 1; } }
27c7b09dabd787889ca9c41c03c97654754126df.cu
#include <searchers/IntBacktrackSearcher.h> #include <utils/Utils.h> #include <wrappers/Wrappers.h> void IntBacktrackSearcher::initialize(FlatZinc::FlatZincModel* fzModel, Statistics* stats) { variables = fzModel->intVariables; constraints = fzModel->intConstraints; chosenVariables.initialize(variables->count); chosenValues.initialize(variables->count); stack.initialize(&variables->domains.representations, stats); variablesChooser.initialzie(IntVariablesChooser::InOrder, variables, &chosenVariables); valuesChooser.initialzie(IntValuesChooser::InOrder, variables); propagator.initialize(variables, constraints, stats); backtrackingLevel = 0; backtrackingState = VariableNotChosen; #ifdef GPU varibalesBlockCount = KernelUtils::getBlockCount(variables->count, DEFAULT_BLOCK_SIZE); #endif switch (fzModel->method()) { case FlatZinc::FlatZincModel::Meth::SAT: { searchType = Satisfiability; } break; case FlatZinc::FlatZincModel::Meth::MAX: { searchType = Maximization; } break; case FlatZinc::FlatZincModel::Meth::MIN: { searchType = Minimization; } break; } if (searchType == Maximization or searchType == Minimization) { optVariable = fzModel->optVar(); optConstraint = fzModel->optConst(); } else { optVariable = -1; optConstraint = -1; } this->stats = stats; stats->varibalesCount = variables->count; stats->constraintsCount = constraints->count; } void IntBacktrackSearcher::deinitialize() { chosenVariables.deinitialize(); chosenValues.deinitialize(); stack.deinitialize(); propagator.deinitialize(); } cudaDevice bool IntBacktrackSearcher::getNextSolution() { bool solutionFound = false; while (backtrackingLevel >= 0 and (not solutionFound)) { switch (backtrackingState) { case VariableNotChosen: { #ifdef GPU Wrappers::saveState<<<varibalesBlockCount, DEFAULT_BLOCK_SIZE>>>(&stack, backtrackingLevel); cudaDeviceSynchronize(); #else stack.saveState(backtrackingLevel); #endif if (variablesChooser.getVariable(backtrackingLevel, &chosenVariable)) { chosenVariables.push_back(chosenVariable); backtrackingState = VariableChosen; } else { LogUtils::error(__PRETTY_FUNCTION__, "Failed to set variable"); } } break; case VariableChosen: { if (not variables->domains.isSingleton(chosenVariables.back())) { if (valuesChooser.getFirstValue(chosenVariables.back(), &chosenValue)) { chosenValues.push_back(chosenValue); variables->domains.fixValue(chosenVariables.back(), chosenValues.back()); backtrackingState = ValueChosen; } else { LogUtils::error(__PRETTY_FUNCTION__, "Failed to set first value"); } } else { chosenValues.push_back(variables->domains.getMin(chosenVariables.back())); backtrackingState = SuccessfulPropagation; } stats->nodesCount += 1; } break; case ValueChosen: { bool noEmptyDomains = propagator.propagateConstraints(); if (noEmptyDomains) { backtrackingState = SuccessfulPropagation; } else { backtrackingState = ValueChecked; stats->failuresCount += 1; } } break; case SuccessfulPropagation: { if (backtrackingLevel < variables->count - 1) { backtrackingLevel += 1; backtrackingState = VariableNotChosen; } else { backtrackingState = ValueChecked; if (propagator.verifyConstraints()) { solutionFound = true; } else { stats->failuresCount += 1; } } } break; case ValueChecked: { #ifdef GPU Wrappers::restoreState<<<varibalesBlockCount, DEFAULT_BLOCK_SIZE>>>(&stack, backtrackingLevel); cudaDeviceSynchronize(); #else stack.restoreState(backtrackingLevel); #endif if (valuesChooser.getNextValue(chosenVariables.back(), chosenValues.back(), &chosenValue)) { chosenValues.back() = chosenValue; variables->domains.fixValue(chosenVariables.back(), chosenValues.back()); backtrackingState = ValueChosen; } else { #ifdef GPU Wrappers::clearState<<<varibalesBlockCount, DEFAULT_BLOCK_SIZE>>>(&stack, backtrackingLevel); cudaDeviceSynchronize(); #else stack.clearState(backtrackingLevel); #endif backtrackingLevel -= 1; chosenVariables.pop_back(); chosenValues.pop_back(); } } break; } } if (solutionFound and (searchType == Maximization or searchType == Minimization)) { shrinkOptimizationBound(); } return solutionFound; } cudaDevice void IntBacktrackSearcher::shrinkOptimizationBound() { if (searchType == Maximization) { constraints->parameters[optConstraint][0] = variables->domains.getMin(optVariable) + 1; } else if (searchType == Minimization) { constraints->parameters[optConstraint][0] = variables->domains.getMin(optVariable) - 1; } }
e17d9a2704a477ed2c645369b6a12250ba1b667c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include "pcl/gpu/utils/timers_cuda.hpp" #include "pcl/gpu/utils/safe_call.hpp" #include "internal.hpp" #include "utils/boxutils.hpp" #include<limits> using namespace pcl::gpu; using namespace pcl::device; using namespace std; namespace pcl { namespace device { __global__ void get_cc_kernel(int *data) { data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x; } } } void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx) { hipFuncAttributes attrs; cudaSafeCall( hipFuncGetAttributes(&attrs, get_cc_kernel) ); bin = attrs.binaryVersion; ptx = attrs.ptxVersion; } void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points) { points = input_points; } void pcl::device::OctreeImpl::internalDownload() { int number; DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number); DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs); DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends); DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes); DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes); points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step); indices.download(host_octree.indices); host_octree.downloaded = true; } namespace { int getBitsNum(int interger) { int count = 0; while(interger > 0) { if (interger & 1) ++count; interger>>=1; } return count; } struct OctreeIteratorHost { const static int MAX_LEVELS_PLUS_ROOT = 11; int paths[MAX_LEVELS_PLUS_ROOT]; int level; OctreeIteratorHost() { level = 0; // root level paths[level] = (0 << 8) + 1; } void gotoNextLevel(int first, int len) { ++level; paths[level] = (first << 8) + len; } int operator*() const { return paths[level] >> 8; } void operator++() { while(level >= 0) { int data = paths[level]; if ((data & 0xFF) > 1) // there are another siblings, can goto there { data += (1 << 8) - 1; // +1 to first and -1 from len paths[level] = data; break; } else --level; //goto parent; } } }; } void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, vector<int>& out, int max_nn) const { out.clear(); float3 center = make_float3(query.x, query.y, query.z); OctreeIteratorHost iterator; while(iterator.level >= 0) { int node_idx = *iterator; int code = host_octree.codes[node_idx]; float3 node_minp = octreeGlobal.minp; float3 node_maxp = octreeGlobal.maxp; calcBoundingBox(iterator.level, code, node_minp, node_maxp); //if true, take nothing, and go to next if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius)) { ++iterator; continue; } //if true, take all, and go to next if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius)) { int beg = host_octree.begs[node_idx]; int end = host_octree.ends[node_idx]; end = beg + min<int>((int)out.size() + end - beg, max_nn) - (int)out.size(); out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end); if (out.size() == (size_t)max_nn) return; ++iterator; continue; } // test children int children_mask = host_octree.nodes[node_idx] & 0xFF; bool isLeaf = children_mask == 0; if (isLeaf) { const int beg = host_octree.begs[node_idx]; const int end = host_octree.ends[node_idx]; for(int j = beg; j < end; ++j) { int index = host_octree.indices[j]; float point_x = host_octree.points_sorted[j ]; float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ]; float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2]; float dx = (point_x - center.x); float dy = (point_y - center.y); float dz = (point_z - center.z); float dist2 = dx * dx + dy * dy + dz * dz; if (dist2 < radius * radius) out.push_back(index); if (out.size() == (size_t)max_nn) return; } ++iterator; continue; } int first = host_octree.nodes[node_idx] >> 8; iterator.gotoNextLevel(first, getBitsNum(children_mask)); } } void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const { float3 minp = octreeGlobal.minp; float3 maxp = octreeGlobal.maxp; int node_idx = 0; bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z; if(!out_of_root) { int code = CalcMorton(minp, maxp)(query); int level = 0; for(;;) { int mask_pos = 1 << Morton::extractLevelCode(code, level); int node = host_octree.nodes[node_idx]; int mask = node & 0xFF; if(getBitsNum(mask) == 0) // leaf break; if ( (mask & mask_pos) == 0) // no child break; node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1)); ++level; } } int beg = host_octree.begs[node_idx]; int end = host_octree.ends[node_idx]; sqr_dist = std::numeric_limits<float>::max(); for(int i = beg; i < end; ++i) { float point_x = host_octree.points_sorted[i ]; float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ]; float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2]; float dx = (point_x - query.x); float dy = (point_y - query.y); float dz = (point_z - query.z); float d2 = dx * dx + dy * dy + dz * dz; if (sqr_dist > d2) { sqr_dist = d2; out_index = i; } } out_index = host_octree.indices[out_index]; }
e17d9a2704a477ed2c645369b6a12250ba1b667c.cu
/* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include "pcl/gpu/utils/timers_cuda.hpp" #include "pcl/gpu/utils/safe_call.hpp" #include "internal.hpp" #include "utils/boxutils.hpp" #include<limits> using namespace pcl::gpu; using namespace pcl::device; using namespace std; namespace pcl { namespace device { __global__ void get_cc_kernel(int *data) { data[threadIdx.x + blockDim.x * blockIdx.x] = threadIdx.x; } } } void pcl::device::OctreeImpl::get_gpu_arch_compiled_for(int& bin, int& ptx) { cudaFuncAttributes attrs; cudaSafeCall( cudaFuncGetAttributes(&attrs, get_cc_kernel) ); bin = attrs.binaryVersion; ptx = attrs.ptxVersion; } void pcl::device::OctreeImpl::setCloud(const PointCloud& input_points) { points = input_points; } void pcl::device::OctreeImpl::internalDownload() { int number; DeviceArray<int>(octreeGlobal.nodes_num, 1).download(&number); DeviceArray<int>(octreeGlobal.begs, number).download(host_octree.begs); DeviceArray<int>(octreeGlobal.ends, number).download(host_octree.ends); DeviceArray<int>(octreeGlobal.nodes, number).download(host_octree.nodes); DeviceArray<int>(octreeGlobal.codes, number).download(host_octree.codes); points_sorted.download(host_octree.points_sorted, host_octree.points_sorted_step); indices.download(host_octree.indices); host_octree.downloaded = true; } namespace { int getBitsNum(int interger) { int count = 0; while(interger > 0) { if (interger & 1) ++count; interger>>=1; } return count; } struct OctreeIteratorHost { const static int MAX_LEVELS_PLUS_ROOT = 11; int paths[MAX_LEVELS_PLUS_ROOT]; int level; OctreeIteratorHost() { level = 0; // root level paths[level] = (0 << 8) + 1; } void gotoNextLevel(int first, int len) { ++level; paths[level] = (first << 8) + len; } int operator*() const { return paths[level] >> 8; } void operator++() { while(level >= 0) { int data = paths[level]; if ((data & 0xFF) > 1) // there are another siblings, can goto there { data += (1 << 8) - 1; // +1 to first and -1 from len paths[level] = data; break; } else --level; //goto parent; } } }; } void pcl::device::OctreeImpl::radiusSearchHost(const PointType& query, float radius, vector<int>& out, int max_nn) const { out.clear(); float3 center = make_float3(query.x, query.y, query.z); OctreeIteratorHost iterator; while(iterator.level >= 0) { int node_idx = *iterator; int code = host_octree.codes[node_idx]; float3 node_minp = octreeGlobal.minp; float3 node_maxp = octreeGlobal.maxp; calcBoundingBox(iterator.level, code, node_minp, node_maxp); //if true, take nothing, and go to next if (checkIfNodeOutsideSphere(node_minp, node_maxp, center, radius)) { ++iterator; continue; } //if true, take all, and go to next if (checkIfNodeInsideSphere(node_minp, node_maxp, center, radius)) { int beg = host_octree.begs[node_idx]; int end = host_octree.ends[node_idx]; end = beg + min<int>((int)out.size() + end - beg, max_nn) - (int)out.size(); out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end); if (out.size() == (size_t)max_nn) return; ++iterator; continue; } // test children int children_mask = host_octree.nodes[node_idx] & 0xFF; bool isLeaf = children_mask == 0; if (isLeaf) { const int beg = host_octree.begs[node_idx]; const int end = host_octree.ends[node_idx]; for(int j = beg; j < end; ++j) { int index = host_octree.indices[j]; float point_x = host_octree.points_sorted[j ]; float point_y = host_octree.points_sorted[j + host_octree.points_sorted_step ]; float point_z = host_octree.points_sorted[j + host_octree.points_sorted_step * 2]; float dx = (point_x - center.x); float dy = (point_y - center.y); float dz = (point_z - center.z); float dist2 = dx * dx + dy * dy + dz * dz; if (dist2 < radius * radius) out.push_back(index); if (out.size() == (size_t)max_nn) return; } ++iterator; continue; } int first = host_octree.nodes[node_idx] >> 8; iterator.gotoNextLevel(first, getBitsNum(children_mask)); } } void pcl::device::OctreeImpl::approxNearestSearchHost(const PointType& query, int& out_index, float& sqr_dist) const { float3 minp = octreeGlobal.minp; float3 maxp = octreeGlobal.maxp; int node_idx = 0; bool out_of_root = query.x < minp.x || query.y < minp.y || query.z < minp.z || query.x > maxp.x || query.y > maxp.y || query.z > maxp.z; if(!out_of_root) { int code = CalcMorton(minp, maxp)(query); int level = 0; for(;;) { int mask_pos = 1 << Morton::extractLevelCode(code, level); int node = host_octree.nodes[node_idx]; int mask = node & 0xFF; if(getBitsNum(mask) == 0) // leaf break; if ( (mask & mask_pos) == 0) // no child break; node_idx = (node >> 8) + getBitsNum(mask & (mask_pos - 1)); ++level; } } int beg = host_octree.begs[node_idx]; int end = host_octree.ends[node_idx]; sqr_dist = std::numeric_limits<float>::max(); for(int i = beg; i < end; ++i) { float point_x = host_octree.points_sorted[i ]; float point_y = host_octree.points_sorted[i + host_octree.points_sorted_step ]; float point_z = host_octree.points_sorted[i + host_octree.points_sorted_step * 2]; float dx = (point_x - query.x); float dy = (point_y - query.y); float dz = (point_z - query.z); float d2 = dx * dx + dy * dy + dz * dz; if (sqr_dist > d2) { sqr_dist = d2; out_index = i; } } out_index = host_octree.indices[out_index]; }
a26e334ab2556d544bb7b2b74ff2847d3d6f52b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Vector-Matrix multiplication: Y = A * X. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "vec_mat_mult.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(float *Ad, float *Xd, float *Yd) { //Multiply A nd X } #endif // #ifndef _MATRIXMUL_KERNEL_H_
a26e334ab2556d544bb7b2b74ff2847d3d6f52b5.cu
/* Vector-Matrix multiplication: Y = A * X. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "vec_mat_mult.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(float *Ad, float *Xd, float *Yd) { //Multiply A nd X } #endif // #ifndef _MATRIXMUL_KERNEL_H_
c5c98f02c4a6a40e569941710873e8d0d4291ce9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/elementwise_impl.cuh" namespace onnxruntime { namespace cuda { template <typename T, typename Tin, bool IsWeighted> struct OpSoftmaxCrossEntropyWeights { OpSoftmaxCrossEntropyWeights(const Tin* label_data, const T* weight_data, Tin C, Tin ignore_index) : label_data_(label_data), weight_data_(weight_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ T operator()(CUDA_LONG idx) const { if (label_data_[idx] != ignore_index_) { if (IsWeighted) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); return weight_data_[label_data_[idx]]; } return T(1.f); } return T(0.f); } const Tin* label_data_; const T* weight_data_; Tin C_; Tin ignore_index_; }; template <typename T, typename Tin> void ComputeSoftmaxCrossEntropyWeightsImpl(hipStream_t stream, const Tin* label, const T* weight, size_t count, size_t label_depth, int64_t ignore_index, T* weight_data_nd) { if (weight) { OpSoftmaxCrossEntropyWeights<T, Tin, true> op(label, weight, static_cast<Tin>(label_depth), static_cast<Tin>(ignore_index)); LaunchElementwiseKernel<T, decltype(op)>(stream, weight_data_nd, op, count); } else { OpSoftmaxCrossEntropyWeights<T, Tin, false> op(label, nullptr, static_cast<Tin>(label_depth), static_cast<Tin>(ignore_index)); LaunchElementwiseKernel<T, decltype(op)>(stream, weight_data_nd, op, count); } } #define INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(T, Tin) \ template void ComputeSoftmaxCrossEntropyWeightsImpl(hipStream_t stream, const Tin* label, const T* weight, \ size_t count, size_t label_depth, int64_t ignore_index, \ T* weight_data_nd) INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int32_t); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int64_t); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(BFloat16, int64_t); #undef INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL template <typename T, typename TAcc, typename Tin> struct OpWeightedSoftmaxCrossEntropyLoss { OpWeightedSoftmaxCrossEntropyLoss(const T* log_prob_data, const Tin* label_data, const T* weight_data, const TAcc* normalize_factor_data, Tin C, Tin ignore_index) : log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ T operator()(CUDA_LONG idx) const { if (label_data_[idx] != ignore_index_) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); return static_cast<T>(static_cast<TAcc>(-log_prob_data_[idx * C_ + label_data_[idx]] * weight_data_[idx]) / (*normalize_factor_data_)); } return T(0.f); } const T* log_prob_data_; const Tin* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; Tin C_; Tin ignore_index_; }; template <typename T, typename TAcc, typename Tin> void SoftmaxCrossEntropyLossImpl(hipStream_t stream, const T* log_prob, const Tin* label, const T* weight, const TAcc* normalize_factor, size_t count, size_t label_depth, int64_t ignore_index, T* output_data) { OpWeightedSoftmaxCrossEntropyLoss<T, TAcc, Tin> op(log_prob, label, weight, normalize_factor, static_cast<Tin>(label_depth), static_cast<Tin>(ignore_index)); LaunchElementwiseKernel<T, decltype(op)>(stream, output_data, op, count); } template <typename T, typename TAcc, typename Tin, bool IsReductionNone, bool HasBias> struct OpWeightedSoftmaxCrossEntropyLossGrad { OpWeightedSoftmaxCrossEntropyLossGrad(const T* dY_data, const T* log_prob_data, const Tin* label_data, const T* weight_data, const TAcc* normalize_factor_data, const T* bias_data, Tin C) : dY_data_(dY_data), log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), bias_data_(bias_data), C_(C) { C_fdm_ = fast_divmod(static_cast<int>(C)); } __device__ __inline__ T operator()(CUDA_LONG idx) const { // normalize_factor is sum of labels' weights. Because zero sum implies all weights are 0, the loss function should // be constant 0 and its corresponding gradient should be 0 as well. T result = T(0.f); if (*normalize_factor_data_ != TAcc(0.f)) { int row, d; C_fdm_.divmod(idx, row, d); CUDA_KERNEL_ASSERT(weight_data_[row] == T(0.f) || (label_data_[row] >= 0 && label_data_[row] < C_)); result = static_cast<T>(static_cast<TAcc>((IsReductionNone ? dY_data_[row] : *dY_data_) * weight_data_[row]) * (_Exp(static_cast<TAcc>(log_prob_data_[idx])) - (TAcc)(d == label_data_[row])) / (*normalize_factor_data_)); } return HasBias ? result + bias_data_[idx] : result; } const T* dY_data_; const T* log_prob_data_; const Tin* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; const T* bias_data_; Tin C_; fast_divmod C_fdm_; }; template <typename T, typename TAcc, typename Tin> void SoftmaxCrossEntropyLossGradImpl(hipStream_t stream, const T* dY, const T* log_prob, const Tin* label, const T* weight, const TAcc* normalize_factor, const T* bias_data, size_t count, size_t label_depth, bool reduction_none, T* output_data) { #define LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(is_reduction_none, has_bias) \ OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, Tin, is_reduction_none, has_bias> op( \ dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<Tin>(label_depth)); \ LaunchElementwiseKernel<T, decltype(op)>(stream, output_data, op, count * label_depth) if (reduction_none) { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, false); } } else { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, false); } } #undef LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL } #define INSTANTIATE_SCE_LOSS_IMPL(T, TAcc, Tin) \ template void SoftmaxCrossEntropyLossImpl(hipStream_t stream, const T* log_prob, const Tin* label, const T* weight, \ const TAcc* normalize_factor, size_t count, size_t label_depth, \ int64_t ignore_index, T* output_data); \ template void SoftmaxCrossEntropyLossGradImpl(hipStream_t stream, const T* dY, const T* log_prob, const Tin* label, \ const T* weight, const TAcc* normalize_factor, const T* bias_data, \ size_t count, size_t label_depth, bool reducation_none, \ T* output_data) INSTANTIATE_SCE_LOSS_IMPL(float, float, int32_t); INSTANTIATE_SCE_LOSS_IMPL(float, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(half, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(BFloat16, float, int64_t); #undef INSTANTIATE_SCE_LOSS_IMPL } // namespace cuda } // namespace onnxruntime
c5c98f02c4a6a40e569941710873e8d0d4291ce9.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/elementwise_impl.cuh" namespace onnxruntime { namespace cuda { template <typename T, typename Tin, bool IsWeighted> struct OpSoftmaxCrossEntropyWeights { OpSoftmaxCrossEntropyWeights(const Tin* label_data, const T* weight_data, Tin C, Tin ignore_index) : label_data_(label_data), weight_data_(weight_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ T operator()(CUDA_LONG idx) const { if (label_data_[idx] != ignore_index_) { if (IsWeighted) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); return weight_data_[label_data_[idx]]; } return T(1.f); } return T(0.f); } const Tin* label_data_; const T* weight_data_; Tin C_; Tin ignore_index_; }; template <typename T, typename Tin> void ComputeSoftmaxCrossEntropyWeightsImpl(cudaStream_t stream, const Tin* label, const T* weight, size_t count, size_t label_depth, int64_t ignore_index, T* weight_data_nd) { if (weight) { OpSoftmaxCrossEntropyWeights<T, Tin, true> op(label, weight, static_cast<Tin>(label_depth), static_cast<Tin>(ignore_index)); LaunchElementwiseKernel<T, decltype(op)>(stream, weight_data_nd, op, count); } else { OpSoftmaxCrossEntropyWeights<T, Tin, false> op(label, nullptr, static_cast<Tin>(label_depth), static_cast<Tin>(ignore_index)); LaunchElementwiseKernel<T, decltype(op)>(stream, weight_data_nd, op, count); } } #define INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(T, Tin) \ template void ComputeSoftmaxCrossEntropyWeightsImpl(cudaStream_t stream, const Tin* label, const T* weight, \ size_t count, size_t label_depth, int64_t ignore_index, \ T* weight_data_nd) INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int32_t); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int64_t); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(BFloat16, int64_t); #undef INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL template <typename T, typename TAcc, typename Tin> struct OpWeightedSoftmaxCrossEntropyLoss { OpWeightedSoftmaxCrossEntropyLoss(const T* log_prob_data, const Tin* label_data, const T* weight_data, const TAcc* normalize_factor_data, Tin C, Tin ignore_index) : log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ T operator()(CUDA_LONG idx) const { if (label_data_[idx] != ignore_index_) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); return static_cast<T>(static_cast<TAcc>(-log_prob_data_[idx * C_ + label_data_[idx]] * weight_data_[idx]) / (*normalize_factor_data_)); } return T(0.f); } const T* log_prob_data_; const Tin* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; Tin C_; Tin ignore_index_; }; template <typename T, typename TAcc, typename Tin> void SoftmaxCrossEntropyLossImpl(cudaStream_t stream, const T* log_prob, const Tin* label, const T* weight, const TAcc* normalize_factor, size_t count, size_t label_depth, int64_t ignore_index, T* output_data) { OpWeightedSoftmaxCrossEntropyLoss<T, TAcc, Tin> op(log_prob, label, weight, normalize_factor, static_cast<Tin>(label_depth), static_cast<Tin>(ignore_index)); LaunchElementwiseKernel<T, decltype(op)>(stream, output_data, op, count); } template <typename T, typename TAcc, typename Tin, bool IsReductionNone, bool HasBias> struct OpWeightedSoftmaxCrossEntropyLossGrad { OpWeightedSoftmaxCrossEntropyLossGrad(const T* dY_data, const T* log_prob_data, const Tin* label_data, const T* weight_data, const TAcc* normalize_factor_data, const T* bias_data, Tin C) : dY_data_(dY_data), log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), bias_data_(bias_data), C_(C) { C_fdm_ = fast_divmod(static_cast<int>(C)); } __device__ __inline__ T operator()(CUDA_LONG idx) const { // normalize_factor is sum of labels' weights. Because zero sum implies all weights are 0, the loss function should // be constant 0 and its corresponding gradient should be 0 as well. T result = T(0.f); if (*normalize_factor_data_ != TAcc(0.f)) { int row, d; C_fdm_.divmod(idx, row, d); CUDA_KERNEL_ASSERT(weight_data_[row] == T(0.f) || (label_data_[row] >= 0 && label_data_[row] < C_)); result = static_cast<T>(static_cast<TAcc>((IsReductionNone ? dY_data_[row] : *dY_data_) * weight_data_[row]) * (_Exp(static_cast<TAcc>(log_prob_data_[idx])) - (TAcc)(d == label_data_[row])) / (*normalize_factor_data_)); } return HasBias ? result + bias_data_[idx] : result; } const T* dY_data_; const T* log_prob_data_; const Tin* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; const T* bias_data_; Tin C_; fast_divmod C_fdm_; }; template <typename T, typename TAcc, typename Tin> void SoftmaxCrossEntropyLossGradImpl(cudaStream_t stream, const T* dY, const T* log_prob, const Tin* label, const T* weight, const TAcc* normalize_factor, const T* bias_data, size_t count, size_t label_depth, bool reduction_none, T* output_data) { #define LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(is_reduction_none, has_bias) \ OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, Tin, is_reduction_none, has_bias> op( \ dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<Tin>(label_depth)); \ LaunchElementwiseKernel<T, decltype(op)>(stream, output_data, op, count * label_depth) if (reduction_none) { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, false); } } else { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, false); } } #undef LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL } #define INSTANTIATE_SCE_LOSS_IMPL(T, TAcc, Tin) \ template void SoftmaxCrossEntropyLossImpl(cudaStream_t stream, const T* log_prob, const Tin* label, const T* weight, \ const TAcc* normalize_factor, size_t count, size_t label_depth, \ int64_t ignore_index, T* output_data); \ template void SoftmaxCrossEntropyLossGradImpl(cudaStream_t stream, const T* dY, const T* log_prob, const Tin* label, \ const T* weight, const TAcc* normalize_factor, const T* bias_data, \ size_t count, size_t label_depth, bool reducation_none, \ T* output_data) INSTANTIATE_SCE_LOSS_IMPL(float, float, int32_t); INSTANTIATE_SCE_LOSS_IMPL(float, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(half, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(BFloat16, float, int64_t); #undef INSTANTIATE_SCE_LOSS_IMPL } // namespace cuda } // namespace onnxruntime
374a4b746d87cfb129e9ab0cd8128ede8dd8094f.hip
// !!! This is a file automatically generated by hipify!!! // // Wrapper for cublasSTCgemm function. // // Sam Hatfield, ECMWF // Alan Gray, NVIDIA // #include <stdio.h> #include "rocblas.h" bool alreadyAllocated_stcgemm = false; bool alreadyAllocated_stcgemm_handle = false; half **d_Aarray_stcgemm; half **d_Barray_stcgemm; float **d_Carray_stcgemm; half **Aarray_stcgemm; half **Barray_stcgemm; float **Carray_stcgemm; hipblasHandle_t handle_stcgemm; extern "C" void cublasSTCgemmBatched_wrapper( char transa, char transb, int m, int n, int k, float alpha, const half *A, int lda, int tda, const half *B, int ldb, int tdb, float beta, float *C, int ldc, int tdc, int batchCount ){ // Define CUBLAS operation handles hipblasOperation_t op_t1, op_t2; // Decide whether to transpose matrices or not op_t1 = (transa == 'T' || transa == 't') ? HIPBLAS_OP_T : HIPBLAS_OP_N; op_t2 = (transb == 'T' || transb == 't') ? HIPBLAS_OP_T : HIPBLAS_OP_N; // Initialize CUBLAS handle if (!alreadyAllocated_stcgemm_handle) { hipblasCreate(&handle_stcgemm); alreadyAllocated_stcgemm_handle = true; } // Allocate host arrays if (!alreadyAllocated_stcgemm) { hipHostMalloc(&Aarray_stcgemm, batchCount*sizeof(half*)); hipHostMalloc(&Barray_stcgemm, batchCount*sizeof(half*)); hipHostMalloc(&Carray_stcgemm, batchCount*sizeof(float*)); alreadyAllocated_stcgemm = true; } // Allocate device arrays hipMalloc(&d_Aarray_stcgemm, batchCount*sizeof(half*)); hipMalloc(&d_Barray_stcgemm, batchCount*sizeof(half*)); hipMalloc(&d_Carray_stcgemm, batchCount*sizeof(float*)); // Transfer data from input arrays to host arrays for (int i = 0; i < batchCount; i++) { Aarray_stcgemm[i] = (half*) &(A[i*lda*tda]); Barray_stcgemm[i] = (half*) &(B[i*ldb*tdb]); Carray_stcgemm[i] = (float*) &(C[i*ldc*tdc]); } // Transfer data from host arrays to device arrays hipMemcpy(d_Aarray_stcgemm, Aarray_stcgemm, batchCount*sizeof(half*), hipMemcpyHostToDevice); hipMemcpy(d_Barray_stcgemm, Barray_stcgemm, batchCount*sizeof(half*), hipMemcpyHostToDevice); hipMemcpy(d_Carray_stcgemm, Carray_stcgemm, batchCount*sizeof(float*), hipMemcpyHostToDevice); // Perform batched SGEMM hipblasGemmBatchedEx(handle_stcgemm, op_t1, op_t2, m, n, k, (const void*)&alpha, (const void**)d_Aarray_stcgemm, HIP_R_16F, lda, (const void**)d_Barray_stcgemm, HIP_R_16F, ldb, (const void*)&beta, (void**)d_Carray_stcgemm, HIP_R_32F, ldc, batchCount, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP ); hipDeviceSynchronize(); // Free device arrays hipFree(d_Aarray_stcgemm); hipFree(d_Barray_stcgemm); hipFree(d_Carray_stcgemm); } extern "C" void cublasSTCgemmBatched_finalize() { if (alreadyAllocated_stcgemm) { hipFree(Aarray_stcgemm); hipFree(Barray_stcgemm); hipFree(Carray_stcgemm); hipFree(d_Aarray_stcgemm); hipFree(d_Barray_stcgemm); hipFree(d_Carray_stcgemm); } if (alreadyAllocated_stcgemm_handle) { hipblasDestroy(handle_stcgemm); } }
374a4b746d87cfb129e9ab0cd8128ede8dd8094f.cu
// // Wrapper for cublasSTCgemm function. // // Sam Hatfield, ECMWF // Alan Gray, NVIDIA // #include <stdio.h> #include "cublas_v2.h" bool alreadyAllocated_stcgemm = false; bool alreadyAllocated_stcgemm_handle = false; half **d_Aarray_stcgemm; half **d_Barray_stcgemm; float **d_Carray_stcgemm; half **Aarray_stcgemm; half **Barray_stcgemm; float **Carray_stcgemm; cublasHandle_t handle_stcgemm; extern "C" void cublasSTCgemmBatched_wrapper( char transa, char transb, int m, int n, int k, float alpha, const half *A, int lda, int tda, const half *B, int ldb, int tdb, float beta, float *C, int ldc, int tdc, int batchCount ){ // Define CUBLAS operation handles cublasOperation_t op_t1, op_t2; // Decide whether to transpose matrices or not op_t1 = (transa == 'T' || transa == 't') ? CUBLAS_OP_T : CUBLAS_OP_N; op_t2 = (transb == 'T' || transb == 't') ? CUBLAS_OP_T : CUBLAS_OP_N; // Initialize CUBLAS handle if (!alreadyAllocated_stcgemm_handle) { cublasCreate(&handle_stcgemm); alreadyAllocated_stcgemm_handle = true; } // Allocate host arrays if (!alreadyAllocated_stcgemm) { cudaMallocHost(&Aarray_stcgemm, batchCount*sizeof(half*)); cudaMallocHost(&Barray_stcgemm, batchCount*sizeof(half*)); cudaMallocHost(&Carray_stcgemm, batchCount*sizeof(float*)); alreadyAllocated_stcgemm = true; } // Allocate device arrays cudaMalloc(&d_Aarray_stcgemm, batchCount*sizeof(half*)); cudaMalloc(&d_Barray_stcgemm, batchCount*sizeof(half*)); cudaMalloc(&d_Carray_stcgemm, batchCount*sizeof(float*)); // Transfer data from input arrays to host arrays for (int i = 0; i < batchCount; i++) { Aarray_stcgemm[i] = (half*) &(A[i*lda*tda]); Barray_stcgemm[i] = (half*) &(B[i*ldb*tdb]); Carray_stcgemm[i] = (float*) &(C[i*ldc*tdc]); } // Transfer data from host arrays to device arrays cudaMemcpy(d_Aarray_stcgemm, Aarray_stcgemm, batchCount*sizeof(half*), cudaMemcpyHostToDevice); cudaMemcpy(d_Barray_stcgemm, Barray_stcgemm, batchCount*sizeof(half*), cudaMemcpyHostToDevice); cudaMemcpy(d_Carray_stcgemm, Carray_stcgemm, batchCount*sizeof(float*), cudaMemcpyHostToDevice); // Perform batched SGEMM cublasGemmBatchedEx(handle_stcgemm, op_t1, op_t2, m, n, k, (const void*)&alpha, (const void**)d_Aarray_stcgemm, CUDA_R_16F, lda, (const void**)d_Barray_stcgemm, CUDA_R_16F, ldb, (const void*)&beta, (void**)d_Carray_stcgemm, CUDA_R_32F, ldc, batchCount, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP ); cudaDeviceSynchronize(); // Free device arrays cudaFree(d_Aarray_stcgemm); cudaFree(d_Barray_stcgemm); cudaFree(d_Carray_stcgemm); } extern "C" void cublasSTCgemmBatched_finalize() { if (alreadyAllocated_stcgemm) { cudaFree(Aarray_stcgemm); cudaFree(Barray_stcgemm); cudaFree(Carray_stcgemm); cudaFree(d_Aarray_stcgemm); cudaFree(d_Barray_stcgemm); cudaFree(d_Carray_stcgemm); } if (alreadyAllocated_stcgemm_handle) { cublasDestroy(handle_stcgemm); } }
deaba48f6390904a9f196318847eca766ee73c53.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifdef PADDLE_WITH_CUDA #include <hip/hip_runtime.h> #endif #ifdef PADDLE_WITH_HIP #include <hip/hip_runtime.h> #endif #include "gtest/gtest.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/place.h" namespace paddle { namespace memory { __global__ void write_kernel(int* data, uint64_t n, uint64_t step) { int thread_num = gridDim.x * blockDim.x; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; for (uint64_t i = thread_id; i * step < n; i += thread_num) { *(data + i * step) = 1; } } __global__ void sum_kernel(int* data, uint64_t n, uint64_t step, int* sum) { int thread_num = gridDim.x * blockDim.x; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; for (uint64_t i = thread_id; i * step < n; i += thread_num) { atomicAdd(sum, *(data + i * step)); } } TEST(ManagedMemoryTest, H2DTest) { if (!platform::IsGPUManagedMemorySupported(0)) { return; } uint64_t n_data = 1024; uint64_t step = 1; allocation::AllocationPtr allocation = Alloc(platform::CUDAPlace(0), n_data * sizeof(int)); int* data = static_cast<int*>(allocation->ptr()); memset(data, 0, n_data * sizeof(int)); // located on host memory hipLaunchKernelGGL(( write_kernel), dim3(1), dim3(1024), 0, 0, data, n_data, step); // trans to device memory #ifdef PADDLE_WITH_CUDA PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #else PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif int sum = 0; for (uint64_t i = 0; i < n_data; ++i) { sum += *(data + i); } EXPECT_EQ(sum, n_data / step); allocation = nullptr; } TEST(ManagedMemoryTest, D2HTest) { if (!platform::IsGPUManagedMemorySupported(0)) { return; } uint64_t n_data = 1024; uint64_t step = 1; AllocationPtr allocation = Alloc(platform::CUDAPlace(0), n_data * sizeof(int)); int* data = static_cast<int*>(allocation->ptr()); hipLaunchKernelGGL(( write_kernel), dim3(1), dim3(1024), 0, 0, data, n_data, step); // located on device memory #ifdef PADDLE_WITH_CUDA PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #else PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif memset(data, 0, n_data * sizeof(int)); // trans to host memory int sum = 0; for (uint64_t i = 0; i < n_data; ++i) { sum += *(data + i); } EXPECT_EQ(sum, 0); } TEST(ManagedMemoryTest, OversubscribeGPUMemoryTest) { if (!platform::IsGPUManagedMemoryOversubscriptionSupported(0)) { return; } uint64_t available_mem = platform::GpuAvailableMemToAlloc(); uint64_t n_data = available_mem * 2 / sizeof(int) + 1; // requires more than 2 * available_mem bytes uint64_t step = 1024; AllocationPtr data_allocation = Alloc(platform::CUDAPlace(0), n_data * sizeof(int)); AllocationPtr sum_allocation = Alloc(platform::CUDAPlace(0), sizeof(int)); int* data = static_cast<int*>(data_allocation->ptr()); int* sum = static_cast<int*>(sum_allocation->ptr()); (*sum) = 0; hipLaunchKernelGGL(( write_kernel), dim3(5120), dim3(1024), 0, 0, data, n_data, step); hipLaunchKernelGGL(( sum_kernel), dim3(5120), dim3(1024), 0, 0, data, n_data, step, sum); #ifdef PADDLE_WITH_CUDA PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #else PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif EXPECT_EQ(*sum, (n_data + step - 1) / step); } TEST(ManagedMemoryTest, OOMExceptionTest) { if (!platform::IsGPUManagedMemorySupported(0)) { return; } EXPECT_THROW(Alloc(platform::CUDAPlace(0), size_t(1) << 60), memory::allocation::BadAlloc); } } // namespace memory } // namespace paddle
deaba48f6390904a9f196318847eca766ee73c53.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifdef PADDLE_WITH_CUDA #include <cuda_runtime.h> #endif #ifdef PADDLE_WITH_HIP #include <hip/hip_runtime.h> #endif #include "gtest/gtest.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/place.h" namespace paddle { namespace memory { __global__ void write_kernel(int* data, uint64_t n, uint64_t step) { int thread_num = gridDim.x * blockDim.x; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; for (uint64_t i = thread_id; i * step < n; i += thread_num) { *(data + i * step) = 1; } } __global__ void sum_kernel(int* data, uint64_t n, uint64_t step, int* sum) { int thread_num = gridDim.x * blockDim.x; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; for (uint64_t i = thread_id; i * step < n; i += thread_num) { atomicAdd(sum, *(data + i * step)); } } TEST(ManagedMemoryTest, H2DTest) { if (!platform::IsGPUManagedMemorySupported(0)) { return; } uint64_t n_data = 1024; uint64_t step = 1; allocation::AllocationPtr allocation = Alloc(platform::CUDAPlace(0), n_data * sizeof(int)); int* data = static_cast<int*>(allocation->ptr()); memset(data, 0, n_data * sizeof(int)); // located on host memory write_kernel<<<1, 1024>>>(data, n_data, step); // trans to device memory #ifdef PADDLE_WITH_CUDA PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); #else PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif int sum = 0; for (uint64_t i = 0; i < n_data; ++i) { sum += *(data + i); } EXPECT_EQ(sum, n_data / step); allocation = nullptr; } TEST(ManagedMemoryTest, D2HTest) { if (!platform::IsGPUManagedMemorySupported(0)) { return; } uint64_t n_data = 1024; uint64_t step = 1; AllocationPtr allocation = Alloc(platform::CUDAPlace(0), n_data * sizeof(int)); int* data = static_cast<int*>(allocation->ptr()); write_kernel<<<1, 1024>>>(data, n_data, step); // located on device memory #ifdef PADDLE_WITH_CUDA PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); #else PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif memset(data, 0, n_data * sizeof(int)); // trans to host memory int sum = 0; for (uint64_t i = 0; i < n_data; ++i) { sum += *(data + i); } EXPECT_EQ(sum, 0); } TEST(ManagedMemoryTest, OversubscribeGPUMemoryTest) { if (!platform::IsGPUManagedMemoryOversubscriptionSupported(0)) { return; } uint64_t available_mem = platform::GpuAvailableMemToAlloc(); uint64_t n_data = available_mem * 2 / sizeof(int) + 1; // requires more than 2 * available_mem bytes uint64_t step = 1024; AllocationPtr data_allocation = Alloc(platform::CUDAPlace(0), n_data * sizeof(int)); AllocationPtr sum_allocation = Alloc(platform::CUDAPlace(0), sizeof(int)); int* data = static_cast<int*>(data_allocation->ptr()); int* sum = static_cast<int*>(sum_allocation->ptr()); (*sum) = 0; write_kernel<<<5120, 1024>>>(data, n_data, step); sum_kernel<<<5120, 1024>>>(data, n_data, step, sum); #ifdef PADDLE_WITH_CUDA PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); #else PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); #endif EXPECT_EQ(*sum, (n_data + step - 1) / step); } TEST(ManagedMemoryTest, OOMExceptionTest) { if (!platform::IsGPUManagedMemorySupported(0)) { return; } EXPECT_THROW(Alloc(platform::CUDAPlace(0), size_t(1) << 60), memory::allocation::BadAlloc); } } // namespace memory } // namespace paddle
9d4e2be37b616f06839efab72a4ffaf8947bf627.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<stdio.h> #include<math.h> /* * multiply vectors componentwise * author: bjr * date: nov 2017 * last update: jan 2019 * 30 aug 2022 -bjr: update for csc596 (231) */ #ifndef N_ELEM #define N_ELEM 32 #endif // cuda kernel __global__ void prod_array(float * a, float *b, float * c) { int i = threadIdx.x + blockIdx.x * blockDim.x ; a[i] = b[i] * c[i] ; return ; } // host routines void initialData(float *ip, int size) { time_t t ; int i ; static int j = 0 ; if (!j++) srand ((unsigned)time(&t)) ; for (i=0; i<size; i++) { ip[i] = (float) ( rand() & 0xFF ) / 10.0f ; } return ; } #define PRINT_I 6 #define PRINT_L 2 void printData(const char * s, float *ip, int n) { int i, k ; int f = PRINT_I ; int l = PRINT_L ; printf("%s\t",s) ; if (n<=f) { for (i=0;i<n;i++) { printf("%5.2f\t", ip[i]) ; } printf("\n") ; return ; } for (i=0;i<f;i++) { printf("%5.2f\t", ip[i]) ; } printf("\t...\t") ; k = n - l ; if (k<f) k = f ; for (i=k;i<n;i++) { printf("%5.2f\t", ip[i]) ; } printf("\n") ; return ; } float distance(float * a, float * b, float *c, int n) { float f, dist = 0.0 ; int i ; for (i=0;i<n;i++) { f = b[i] * c[i] - a[i] ; dist += f*f ; } return sqrt(dist) ; } int main(int argc, char * argv[]) { int dev = 0 ; int n = N_ELEM ; int n_bytes = n * sizeof(float) ; float * h_a, * h_b, * h_c ; float * d_a, * d_b, * d_c ; hipSetDevice(dev) ; h_a = (float *) malloc(n_bytes) ; h_b = (float *) malloc(n_bytes) ; h_c = (float *) malloc(n_bytes) ; hipMalloc((float **)&d_a, n_bytes) ; hipMalloc((float **)&d_b, n_bytes) ; hipMalloc((float **)&d_c, n_bytes) ; initialData(h_b, n ) ; initialData(h_c, n ) ; // send data to cuda device hipMemcpy(d_b, h_b, n_bytes, hipMemcpyHostToDevice) ; hipMemcpy(d_c, h_c, n_bytes, hipMemcpyHostToDevice) ; hipLaunchKernelGGL(( prod_array) , dim3(1),dim3(n), 0, 0, d_a, d_b, d_c ) ; hipMemcpy(h_a, d_a, n_bytes, hipMemcpyDeviceToHost) ; printf("n = %d\n", n) ; printData("b =\n ", h_b,n) ; printData("c =\n ", h_c,n) ; printData("product =\n ",h_a,n) ; printf("error = %f\n", distance(h_a,h_b,h_c,n) ) ; hipFree(d_a) ; hipFree(d_b) ; hipFree(d_c) ; free(h_a) ; free(h_b) ; free(h_c) ; return 0 ; }
9d4e2be37b616f06839efab72a4ffaf8947bf627.cu
#include<cuda_runtime.h> #include<stdio.h> #include<math.h> /* * multiply vectors componentwise * author: bjr * date: nov 2017 * last update: jan 2019 * 30 aug 2022 -bjr: update for csc596 (231) */ #ifndef N_ELEM #define N_ELEM 32 #endif // cuda kernel __global__ void prod_array(float * a, float *b, float * c) { int i = threadIdx.x + blockIdx.x * blockDim.x ; a[i] = b[i] * c[i] ; return ; } // host routines void initialData(float *ip, int size) { time_t t ; int i ; static int j = 0 ; if (!j++) srand ((unsigned)time(&t)) ; for (i=0; i<size; i++) { ip[i] = (float) ( rand() & 0xFF ) / 10.0f ; } return ; } #define PRINT_I 6 #define PRINT_L 2 void printData(const char * s, float *ip, int n) { int i, k ; int f = PRINT_I ; int l = PRINT_L ; printf("%s\t",s) ; if (n<=f) { for (i=0;i<n;i++) { printf("%5.2f\t", ip[i]) ; } printf("\n") ; return ; } for (i=0;i<f;i++) { printf("%5.2f\t", ip[i]) ; } printf("\t...\t") ; k = n - l ; if (k<f) k = f ; for (i=k;i<n;i++) { printf("%5.2f\t", ip[i]) ; } printf("\n") ; return ; } float distance(float * a, float * b, float *c, int n) { float f, dist = 0.0 ; int i ; for (i=0;i<n;i++) { f = b[i] * c[i] - a[i] ; dist += f*f ; } return sqrt(dist) ; } int main(int argc, char * argv[]) { int dev = 0 ; int n = N_ELEM ; int n_bytes = n * sizeof(float) ; float * h_a, * h_b, * h_c ; float * d_a, * d_b, * d_c ; cudaSetDevice(dev) ; h_a = (float *) malloc(n_bytes) ; h_b = (float *) malloc(n_bytes) ; h_c = (float *) malloc(n_bytes) ; cudaMalloc((float **)&d_a, n_bytes) ; cudaMalloc((float **)&d_b, n_bytes) ; cudaMalloc((float **)&d_c, n_bytes) ; initialData(h_b, n ) ; initialData(h_c, n ) ; // send data to cuda device cudaMemcpy(d_b, h_b, n_bytes, cudaMemcpyHostToDevice) ; cudaMemcpy(d_c, h_c, n_bytes, cudaMemcpyHostToDevice) ; prod_array <<<1,n>>> ( d_a, d_b, d_c ) ; cudaMemcpy(h_a, d_a, n_bytes, cudaMemcpyDeviceToHost) ; printf("n = %d\n", n) ; printData("b =\n ", h_b,n) ; printData("c =\n ", h_c,n) ; printData("product =\n ",h_a,n) ; printf("error = %f\n", distance(h_a,h_b,h_c,n) ) ; cudaFree(d_a) ; cudaFree(d_b) ; cudaFree(d_c) ; free(h_a) ; free(h_b) ; free(h_c) ; return 0 ; }
03e66c00297633aa378e7fb7e8f76027956814b4.hip
// !!! This is a file automatically generated by hipify!!! /* * The MIT License * * Copyright (c) 1997-2017 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <random> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <MersenneTwister.h> #define BLKWIDTH 32 //______________________________________________________________________ // // // The following compares the random number generation on the CPU vs GPU // // //______________________________________________________________________ //______________________________________________________________________ // inline int RoundUp(double d) { if(d>=0){ if((d-(int)d) == 0){ return (int)d; } else{ return (int)(d+1); } } else { return (int)d; } } //______________________________________________________________________ // void stopwatch( std::string message, time_t start) { double secs; time_t stop; /* timing variables */ stop = time(nullptr); secs = difftime(stop, start); fprintf(stdout," %.f [s] %s \n",secs, message.c_str()); } //______________________________________________________________________ // CPU based random number generations void randCPU( double *M, int nRandNums) { unsigned int size = nRandNums; unsigned int Imem_size = sizeof(unsigned int) * size; unsigned int Dmem_size = sizeof(double) * size; int* org_randInt = (int*)malloc(Imem_size); int* new_randInt = (int*)malloc(Imem_size); double* org_randDbl = (double*)malloc(Dmem_size); double* new_randDbl = (double*)malloc(Dmem_size); //__________________________________ // Orginal implementation MTRand mTwister; for (int i = 0; i< nRandNums; i++){ mTwister.seed(i); org_randDbl[i] = mTwister.rand(); org_randInt[i] = mTwister.randInt(); } //__________________________________ // C++11 std::mt19937 mTwist; std::uniform_real_distribution<double> D_dist(0.0,1.0); std::uniform_int_distribution<int> I_dist; // mTwist.seed(1234ULL); for (int i = 0; i< nRandNums; i++){ new_randDbl[i] = D_dist( mTwist ); new_randInt[i] = I_dist( mTwist ); } for (int i = 0; i< nRandNums; i++){ M[i] = new_randDbl[i]; } for (int i = 0; i< nRandNums; i++){ printf( "%i org_randDbl: %g new_randDbl: %g org_randInt: %i, new_randInt: %i\n",i, org_randDbl[i], new_randDbl[i], org_randInt[i], new_randInt[i]); } free( org_randInt ); free( new_randInt ); free( org_randDbl ); free( new_randDbl ); } //______________________________________________________________________ // Determine device properties void deviceProperties( int &maxThreadsPerBlock ) { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); // Iterate through devices for (int deviceNum = 0; deviceNum < devCount; ++deviceNum){ hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, deviceNum); // printDevProp(deviceProp); maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; } } //______________________________________________________________________ // This is the host side random number generation using cuda void randGPU_V1( double *M, int nRandNums) { int size = nRandNums* sizeof(double); double* Md; //__________________________________ // allocate device memory and copy memory to the device hipMalloc( (void**)&Md, size); hipMemcpy( Md, M, size, hipMemcpyHostToDevice ); //__________________________________ // Create pseudo-random number generator // set the seed // generate the numbers hiprandGenerator_t randGen; // hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_MT19937); hiprandSetPseudoRandomGeneratorSeed(randGen, 1234ULL); hiprandGenerateUniformDouble(randGen, Md, nRandNums); //__________________________________ // copy from device memory and free device matrices hipMemcpy( M, Md, size, hipMemcpyDeviceToHost ); hipFree( Md ); hiprandDestroyGenerator(randGen); } //______________________________________________________________________ // Returns an random number __device__ double randDevice(hiprandState_t* globalState, const int tid) { hiprandState_t localState = globalState[tid]; double val = hiprand(&localState); globalState[tid] = localState; return (double)val * (1.0/4294967295.0); } //______________________________________________________________________ // Returns an random number excluding 0 & 1.0. See MersenneTwister.h // __device__ double randDblExcDevice(hiprandState_t* globalState, const int tid) { hiprandState_t localState = globalState[tid]; double val = hiprand(&localState); globalState[tid] = localState; return ( double(val) + 0.5 ) * (1.0/4294967296.0); } //______________________________________________________________________ // __global__ void setup_kernel(hiprandState_t* randNumStates) { int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(1234, tID, 0, &randNumStates[tID]); } //______________________________________________________________________ // Kernel: __global__ void randNumKernel( hiprandState_t* randNumStates, double* M, double* N, int nRandNums ) { int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; // for (int k = 0; k < nRandNums; ++k){ M[tID] = randDblExcDevice( randNumStates, tID); N[tID] = randDevice( randNumStates, tID ); // } } //______________________________________________________________________ // Device side random number generator void randGPU_V2( double *M, double *N,int nRandNums) { int size = nRandNums* sizeof(double); double* Md; double* Nd; //__________________________________ // allocate device memory and copy memory to the device hipMalloc( (void**)&Md, size); hipMalloc( (void**)&Nd, size); //__________________________________ // copy host memory -> device hipMemcpy( Md, M, size, hipMemcpyHostToDevice ); hipMemcpy( Nd, N, size, hipMemcpyHostToDevice ); //__________________________________ // int maxThreadsPerBlock = 0; deviceProperties( maxThreadsPerBlock ); int xMaxThreadsPerBlock = BLKWIDTH; int yMaxThreadsPerBlock = BLKWIDTH; maxThreadsPerBlock = xMaxThreadsPerBlock * yMaxThreadsPerBlock; // hardwired for now int threadsPerBlock = min(maxThreadsPerBlock, nRandNums); int xBlocks = 0; int yBlocks = 0; if( nRandNums > maxThreadsPerBlock){ int nBlocks = RoundUp( nRandNums/sqrt(maxThreadsPerBlock) ); xBlocks = RoundUp( nRandNums/xMaxThreadsPerBlock ); yBlocks = RoundUp( nRandNums/yMaxThreadsPerBlock ); }else{ xBlocks = 1; // if matrix is smaller than 1 block yBlocks = 1; } int nBlocks = xBlocks = yBlocks; // Assumption that int me = xBlocks * yBlocks * threadsPerBlock; fprintf(stdout, " xBlocks: %d, yBlocks: %d, nRandNums: %d BLKWIDTH: %d, threadsPerBlock %d ",xBlocks, yBlocks, nRandNums, BLKWIDTH, threadsPerBlock); fprintf(stdout, " number of threads: %d\n",me); //__________________________________ // Kernel invocation dim3 dimBlock(BLKWIDTH, BLKWIDTH, 1); dim3 dimGrid( xBlocks, yBlocks, 1); // setup random number generator states on the device, 1 for each thread hiprandState_t* randNumStates; int numStates = dimGrid.x * dimGrid.y * dimBlock.x * dimBlock.y * dimBlock.z; hipMalloc((void**)&randNumStates, numStates * sizeof(hiprandState_t)); //__________________________________ // Global Memory Kernel time_t start = time(nullptr); hipLaunchKernelGGL(( setup_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, randNumStates ); stopwatch(" randDeviceGPU setup_kernel: ", start); start = time(nullptr); hipLaunchKernelGGL(( randNumKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, randNumStates, Md, Nd, nRandNums ); stopwatch(" randDeviceGPU randNumKernel: ", start); //__________________________________ // copy from device memory and free memory start = time(nullptr); hipMemcpy( M, Md, size, hipMemcpyDeviceToHost ); hipMemcpy( N, Nd, size, hipMemcpyDeviceToHost ); stopwatch(" randDeviceGPU memcopy: ", start); start = time(nullptr); hipFree( Md ); hipFree( Nd ); hipFree(randNumStates) ; stopwatch(" randDeviceGPU free memory: ", start); } //______________________________________________________________________ int main( int argc, char** argv) { FILE *fp; fp = fopen("randomNumbers.dat", "w"); for(int power = 0; power<2; ++power) { //int nRandNums = pow(10,power); int nRandNums = 8; fprintf(stdout,"__________________________________\n"); fprintf(stdout," nRand %d \n", nRandNums); //__________________________________ // allocate memory unsigned int size = nRandNums; unsigned int mem_size = sizeof(double) * size; double* rand_CPU = (double*)malloc(mem_size); double* rand_GPU_L = (double*)malloc(mem_size); double* rand_GPU_M = (double*)malloc(mem_size); double* rand_GPU_N = (double*)malloc(mem_size); time_t start; start = time(nullptr); //__________________________________ // Compute the random numbers randCPU( rand_CPU, nRandNums ); stopwatch(" randCPU: ", start); start = time(nullptr); randGPU_V1( rand_GPU_L, nRandNums); stopwatch(" randGPU_V1: ", start); start = time(nullptr); randGPU_V2( rand_GPU_M, rand_GPU_N, nRandNums); stopwatch(" randGPU_V2: ", start); //__________________________________ // Output data fprintf( fp, " #CPU, GPU_V1, GPU_dblExc, GPU_dblInc\n"); for (int i = 0; i< nRandNums; i++){ fprintf( fp, "%i:%i, %16.15E, %16.15E, %16.15E, %16.15E\n",power,i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] ); //printf( "%i, %16.15E, %16.15E, %16.15E, %16.15E\n",i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] ); } //__________________________________ //Free memory free( rand_CPU ); free( rand_GPU_L ); free( rand_GPU_M ); free( rand_GPU_N ); } // loop fclose(fp); }
03e66c00297633aa378e7fb7e8f76027956814b4.cu
/* * The MIT License * * Copyright (c) 1997-2017 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <random> #include <cuda.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include <MersenneTwister.h> #define BLKWIDTH 32 //______________________________________________________________________ // // // The following compares the random number generation on the CPU vs GPU // // //______________________________________________________________________ //______________________________________________________________________ // inline int RoundUp(double d) { if(d>=0){ if((d-(int)d) == 0){ return (int)d; } else{ return (int)(d+1); } } else { return (int)d; } } //______________________________________________________________________ // void stopwatch( std::string message, time_t start) { double secs; time_t stop; /* timing variables */ stop = time(nullptr); secs = difftime(stop, start); fprintf(stdout," %.f [s] %s \n",secs, message.c_str()); } //______________________________________________________________________ // CPU based random number generations void randCPU( double *M, int nRandNums) { unsigned int size = nRandNums; unsigned int Imem_size = sizeof(unsigned int) * size; unsigned int Dmem_size = sizeof(double) * size; int* org_randInt = (int*)malloc(Imem_size); int* new_randInt = (int*)malloc(Imem_size); double* org_randDbl = (double*)malloc(Dmem_size); double* new_randDbl = (double*)malloc(Dmem_size); //__________________________________ // Orginal implementation MTRand mTwister; for (int i = 0; i< nRandNums; i++){ mTwister.seed(i); org_randDbl[i] = mTwister.rand(); org_randInt[i] = mTwister.randInt(); } //__________________________________ // C++11 std::mt19937 mTwist; std::uniform_real_distribution<double> D_dist(0.0,1.0); std::uniform_int_distribution<int> I_dist; // mTwist.seed(1234ULL); for (int i = 0; i< nRandNums; i++){ new_randDbl[i] = D_dist( mTwist ); new_randInt[i] = I_dist( mTwist ); } for (int i = 0; i< nRandNums; i++){ M[i] = new_randDbl[i]; } for (int i = 0; i< nRandNums; i++){ printf( "%i org_randDbl: %g new_randDbl: %g org_randInt: %i, new_randInt: %i\n",i, org_randDbl[i], new_randDbl[i], org_randInt[i], new_randInt[i]); } free( org_randInt ); free( new_randInt ); free( org_randDbl ); free( new_randDbl ); } //______________________________________________________________________ // Determine device properties void deviceProperties( int &maxThreadsPerBlock ) { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); // Iterate through devices for (int deviceNum = 0; deviceNum < devCount; ++deviceNum){ cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, deviceNum); // printDevProp(deviceProp); maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; } } //______________________________________________________________________ // This is the host side random number generation using cuda void randGPU_V1( double *M, int nRandNums) { int size = nRandNums* sizeof(double); double* Md; //__________________________________ // allocate device memory and copy memory to the device cudaMalloc( (void**)&Md, size); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice ); //__________________________________ // Create pseudo-random number generator // set the seed // generate the numbers curandGenerator_t randGen; // curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT); curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_MT19937); curandSetPseudoRandomGeneratorSeed(randGen, 1234ULL); curandGenerateUniformDouble(randGen, Md, nRandNums); //__________________________________ // copy from device memory and free device matrices cudaMemcpy( M, Md, size, cudaMemcpyDeviceToHost ); cudaFree( Md ); curandDestroyGenerator(randGen); } //______________________________________________________________________ // Returns an random number __device__ double randDevice(curandState* globalState, const int tid) { curandState localState = globalState[tid]; double val = curand(&localState); globalState[tid] = localState; return (double)val * (1.0/4294967295.0); } //______________________________________________________________________ // Returns an random number excluding 0 & 1.0. See MersenneTwister.h // __device__ double randDblExcDevice(curandState* globalState, const int tid) { curandState localState = globalState[tid]; double val = curand(&localState); globalState[tid] = localState; return ( double(val) + 0.5 ) * (1.0/4294967296.0); } //______________________________________________________________________ // __global__ void setup_kernel(curandState* randNumStates) { int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(1234, tID, 0, &randNumStates[tID]); } //______________________________________________________________________ // Kernel: __global__ void randNumKernel( curandState* randNumStates, double* M, double* N, int nRandNums ) { int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; // for (int k = 0; k < nRandNums; ++k){ M[tID] = randDblExcDevice( randNumStates, tID); N[tID] = randDevice( randNumStates, tID ); // } } //______________________________________________________________________ // Device side random number generator void randGPU_V2( double *M, double *N,int nRandNums) { int size = nRandNums* sizeof(double); double* Md; double* Nd; //__________________________________ // allocate device memory and copy memory to the device cudaMalloc( (void**)&Md, size); cudaMalloc( (void**)&Nd, size); //__________________________________ // copy host memory -> device cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice ); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice ); //__________________________________ // int maxThreadsPerBlock = 0; deviceProperties( maxThreadsPerBlock ); int xMaxThreadsPerBlock = BLKWIDTH; int yMaxThreadsPerBlock = BLKWIDTH; maxThreadsPerBlock = xMaxThreadsPerBlock * yMaxThreadsPerBlock; // hardwired for now int threadsPerBlock = min(maxThreadsPerBlock, nRandNums); int xBlocks = 0; int yBlocks = 0; if( nRandNums > maxThreadsPerBlock){ int nBlocks = RoundUp( nRandNums/sqrt(maxThreadsPerBlock) ); xBlocks = RoundUp( nRandNums/xMaxThreadsPerBlock ); yBlocks = RoundUp( nRandNums/yMaxThreadsPerBlock ); }else{ xBlocks = 1; // if matrix is smaller than 1 block yBlocks = 1; } int nBlocks = xBlocks = yBlocks; // Assumption that int me = xBlocks * yBlocks * threadsPerBlock; fprintf(stdout, " xBlocks: %d, yBlocks: %d, nRandNums: %d BLKWIDTH: %d, threadsPerBlock %d ",xBlocks, yBlocks, nRandNums, BLKWIDTH, threadsPerBlock); fprintf(stdout, " number of threads: %d\n",me); //__________________________________ // Kernel invocation dim3 dimBlock(BLKWIDTH, BLKWIDTH, 1); dim3 dimGrid( xBlocks, yBlocks, 1); // setup random number generator states on the device, 1 for each thread curandState* randNumStates; int numStates = dimGrid.x * dimGrid.y * dimBlock.x * dimBlock.y * dimBlock.z; cudaMalloc((void**)&randNumStates, numStates * sizeof(curandState)); //__________________________________ // Global Memory Kernel time_t start = time(nullptr); setup_kernel<<<dimGrid, dimBlock>>>( randNumStates ); stopwatch(" randDeviceGPU setup_kernel: ", start); start = time(nullptr); randNumKernel<<<dimGrid, dimBlock>>>( randNumStates, Md, Nd, nRandNums ); stopwatch(" randDeviceGPU randNumKernel: ", start); //__________________________________ // copy from device memory and free memory start = time(nullptr); cudaMemcpy( M, Md, size, cudaMemcpyDeviceToHost ); cudaMemcpy( N, Nd, size, cudaMemcpyDeviceToHost ); stopwatch(" randDeviceGPU memcopy: ", start); start = time(nullptr); cudaFree( Md ); cudaFree( Nd ); cudaFree(randNumStates) ; stopwatch(" randDeviceGPU free memory: ", start); } //______________________________________________________________________ int main( int argc, char** argv) { FILE *fp; fp = fopen("randomNumbers.dat", "w"); for(int power = 0; power<2; ++power) { //int nRandNums = pow(10,power); int nRandNums = 8; fprintf(stdout,"__________________________________\n"); fprintf(stdout," nRand %d \n", nRandNums); //__________________________________ // allocate memory unsigned int size = nRandNums; unsigned int mem_size = sizeof(double) * size; double* rand_CPU = (double*)malloc(mem_size); double* rand_GPU_L = (double*)malloc(mem_size); double* rand_GPU_M = (double*)malloc(mem_size); double* rand_GPU_N = (double*)malloc(mem_size); time_t start; start = time(nullptr); //__________________________________ // Compute the random numbers randCPU( rand_CPU, nRandNums ); stopwatch(" randCPU: ", start); start = time(nullptr); randGPU_V1( rand_GPU_L, nRandNums); stopwatch(" randGPU_V1: ", start); start = time(nullptr); randGPU_V2( rand_GPU_M, rand_GPU_N, nRandNums); stopwatch(" randGPU_V2: ", start); //__________________________________ // Output data fprintf( fp, " #CPU, GPU_V1, GPU_dblExc, GPU_dblInc\n"); for (int i = 0; i< nRandNums; i++){ fprintf( fp, "%i:%i, %16.15E, %16.15E, %16.15E, %16.15E\n",power,i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] ); //printf( "%i, %16.15E, %16.15E, %16.15E, %16.15E\n",i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] ); } //__________________________________ //Free memory free( rand_CPU ); free( rand_GPU_L ); free( rand_GPU_M ); free( rand_GPU_N ); } // loop fclose(fp); }
ff6507f40b71b352bc53ff68e58e41dd019d3cce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cube(float* d_out, float* d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; }
ff6507f40b71b352bc53ff68e58e41dd019d3cce.cu
#include "includes.h" __global__ void cube(float* d_out, float* d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; }
b330f40e34af4961c38cfb044c7119052f5893ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scan.h" __global__ void scan_v1_kernel(float *d_output, float *d_input, int length) { int idx = blockDim.x * blockIdx.x + threadIdx.x; float element = 0.f; for (int offset = 0; offset < length; offset++) { if (idx - offset >= 0) element += d_input[idx - offset]; } d_output[idx] = element; } void scan_v1(float *d_output, float *d_input, int length) { dim3 dimBlock(BLOCK_DIM); dim3 dimGrid((length + BLOCK_DIM - 1) / BLOCK_DIM); hipLaunchKernelGGL(( scan_v1_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output, d_input, length); }
b330f40e34af4961c38cfb044c7119052f5893ca.cu
#include "scan.h" __global__ void scan_v1_kernel(float *d_output, float *d_input, int length) { int idx = blockDim.x * blockIdx.x + threadIdx.x; float element = 0.f; for (int offset = 0; offset < length; offset++) { if (idx - offset >= 0) element += d_input[idx - offset]; } d_output[idx] = element; } void scan_v1(float *d_output, float *d_input, int length) { dim3 dimBlock(BLOCK_DIM); dim3 dimGrid((length + BLOCK_DIM - 1) / BLOCK_DIM); scan_v1_kernel<<<dimGrid, dimBlock>>>(d_output, d_input, length); }
ccd0461ee3d9348bc95d7b4281f0ab24c0ef6eaa.hip
// !!! This is a file automatically generated by hipify!!! #include <windows.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #include <device_launch_parameters.h> #include <math_constants.h> #include "kernel.h" #include "constants.h" #define CAMERALIGHT 0 #define CIRCLETINT 0 #define LIGHTFRONT 1 #define LIGHTABOVE 0 #define LIGHTBELOW 0 #define LIGHTBEHIND 0 #define COLOR 1 __device__ float EpsilonRaymarch = 0; __device__ unsigned int MaxRaymarchSteps = 0; __device__ unsigned int FractalIterations = 0; __device__ bool PrimaryRays = false; __device__ unsigned int PrimarySize = 0; __device__ unsigned int iteration = 0; void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); } } __device__ float DE(const glm::vec3& pos) { //return DEMandelbulb2(pos); return DEMandelbulb1(pos); //return DETetredon(pos); //return DESphere1(pos); //return glm::min(DEMandelbulb2(pos) + DETetredon(pos)); } /* * Distance Estiamtor for a Sphere in origo with radius 3 */ __device__ float DESphere1(const glm::vec3& pos) { float distance = length(pos); if (distance > 3) { distance = distance - 3; } else { distance = 0; } return distance; } /* * Distance estimator for a Tetredon. */ __device__ float DETetredon(const glm::vec3& pos) { glm::vec3 z(pos); float Scale = 2.f; glm::vec3 a1(1, 1, 1); glm::vec3 a2(-1, -1, 1); glm::vec3 a3(1, -1, -1); glm::vec3 a4(-1, 1, -1); glm::vec3 c; int n = 0; float dist, d; while (n < FractalIterations) { c = a1; dist = length(z - a1); d = length(z - a2); if (d < dist) { c = a2; dist = d; } d = length(z - a3); if (d < dist) { c = a3; dist = d; } d = length(z - a4); if (d < dist) { c = a4; dist = d; } z = Scale*z - c*(Scale - 1.0f); n++; } return length(z) * pow(Scale, float(-n)); } /* * Distance estimator for a Mandelbulb. Version 1 */ __device__ float DEMandelbulb1(const glm::vec3& p) { glm::vec3 z = p; float r = 0.0f; float dr = 1.0f; float power = 8.f; for (int i = 0; i < FractalIterations; ++i) { r = length(z); if (r > 8.f) break; // To polar coordianates float theta = acosf(z.y / r); float phi = atan2f(z.x, z.z); float r7 = glm::pow(r, power - 1.f); // Derivative dr = r7 * power * dr + 1.0f; // "Squaring" the length float zr = r7*r; // "Double" the angle theta = theta*power; phi = phi * power; // From polar coordianates z = p + zr*glm::vec3(sinf(phi)*sinf(theta), cosf(theta), sinf(theta) * cosf(phi)); } return 0.5f*logf(r)*r / dr; } /* * Distance estimator for a Mandelbulb. Version 2. Does not show egde cases when we are straight above or below the fractal. */ __device__ float DEMandelbulb2(const glm::vec3& pos) { glm::vec3 zz = pos; float m = dot(zz); float dz = 1.0f; for (int i = 0; i < FractalIterations; ++i) { float m2 = m*m; float m4 = m2*m2; dz = 8.0f*sqrtf(m4*m2*m)*dz + 1.0f; float x = zz.x; float x2 = zz.x*zz.x; float x4 = x2*x2; float y = zz.y; float y2 = zz.y* zz.y; float y4 = y2*y2; float z = zz.z; float z2 = zz.z*zz.z; float z4 = z2*z2; float k3 = x2 + z2; float k2 = 1.f / sqrtf(k3*k3*k3*k3*k3*k3*k3); float k1 = x4 + y4 + z4 - 6.0f*y2*z2 - 6.0f*x2*y2 + 2.0f*z2*x2; float k4 = x2 - y2 + z2; zz.x = pos.x + 64.0f*x*y*z*(x2 - z2)*k4*(x4 - 6.0f*x2*z2 + z4)*k1*k2; zz.y = pos.y + -16.0f*y2*k3*k4*k4 + k1*k1; zz.z = pos.z + -8.0f*y*k4*(x4*x4 - 28.0f*x4*x2*z2 + 70.0f*x4*z4 - 28.0f*x2*z2*z4 + z4*z4)*k1*k2; m = dot(zz); if (m > 1000.0f) break; } return 0.25f*logf(m)*sqrtf(m) / dz; } __device__ bool BoundingSphere(const glm::vec3& dir, const glm::vec3& pos) { float rSquared = 1.2f * 1.2f; return true; if (dot(pos) <= rSquared) { return true; } else if (dot(pos, dir) <= 0.0f) { glm::vec3 v = pos - dir * dot(pos, dir); if (dot(v) <= rSquared) { return true; } } return false; } __device__ glm::vec3 PlaneFloor(const glm::vec3& dir, const glm::vec3& pos) { float denom = dot(glm::vec3(0, 1, 0), dir); if (denom > 0.0001f) { // Only visible from above float t = dot(glm::vec3(0, 1.5f, 0) - pos, (glm::vec3(0, 1, 0))) / denom; if (t >= 0.0f) { glm::vec3 collision = pos + t * dir; if (((int)floorf(collision.x) % 2 == 0 || (int)floorf(collision.z) % 2 == 0) && !((int)floorf(collision.x) % 2 == 0 && (int)floorf(collision.z) % 2 == 0)) { return glm::vec3(100.f, 100.f, 100.f); } else { return glm::vec3(50.f, 50.f, 50.f); } /* float distanceFromOrigo = length(pos + t * dir); if (distanceFromOrigo > 3 && distanceFromOrigo < 7) { return true; } */ } } return glm::vec3(0, 0, 0); } __device__ void color(uchar4* pixels, bool hit, unsigned int steps, glm::vec3 rayDir, glm::vec3 rayOrigin, glm::vec3 position, int index) { // Draw color to pixels if (hit) { float normalEpsilon = 0.000005f; // TODO find a good epsilon for normal glm::vec3 normal(DE(position + glm::vec3(normalEpsilon, 0, 0)) - DE(position - glm::vec3(normalEpsilon, 0, 0)), DE(position + glm::vec3(0, normalEpsilon, 0)) - DE(position - glm::vec3(0, normalEpsilon, 0)), DE(position + glm::vec3(0, 0, normalEpsilon)) - DE(position - glm::vec3(0, 0, normalEpsilon))); normal = normalize(normal); glm::vec3 lightPower(0.f, 0.f, 0.f); #if COLOR // Global illumination lightPower += glm::abs(glm::vec3(position.x, position.y, position.z)) / 2.0f; #else lightPower += glm::vec3(0.1f, 0.1f, 0.1f); #endif #if LIGHTFRONT // Light side lightPower += light(glm::vec3(2.0f, 0.f, -2.f), glm::vec3(1.f, 1.f, 1.f) * 24.f, position, normal, true); #endif #if LIGHTBELOW // Light below lightPower += light(glm::vec3(0.f, 2.f, 0.5f), glm::vec3(1.f, 1.f, 1.f)* 5.f, position, normal, true); #endif #if LIGHTABOVE // Light above lightPower += light(glm::vec3(0.f, -2.f, 0.5f), glm::vec3(1.f, 1.f, 1.f)* 5.f, position, normal, true); #endif #if LIGHTBEHIND // Light behind lightPower += light(glm::vec3(-2.0f, 0.f, -2.0f), glm::vec3(1.f, 1.f, 1.f) * 24.f, position, normal, true); #endif #if CIRCLETINT float tintFactor = 5.f; if (iteration % 1000 < 600) { lightPower += light(glm::vec3(3.0f, 1.f, 0.f), glm::vec3(1.f, 0.f, 0.f) *tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, 3.f), glm::vec3(1.f, 0.f, 0.f) * tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, -3.f), glm::vec3(1.f, 0.f, 0.f) * tintFactor, position, normal, true); lightPower += light(glm::vec3(-3.0f, 1.f, 0.f), glm::vec3(1.f, 0.f, 0.f) * tintFactor, position, normal, true); } else { lightPower += light(glm::vec3(3.0f, 1.f, 0.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, 3.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, -3.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); lightPower += light(glm::vec3(-3.0f, 1.f, 0.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); } #endif #if CAMERALIGHT // Use camera as a light lightPower += light(rayOrigin, glm::vec3(1.f, 1.f, 1.f) * 5.f, position, normal, false); #endif // Clamping lightPower = glm::min(glm::vec3(1.f, 1.f, 1.f), lightPower); lightPower = glm::max(glm::vec3(0.f, 0.f, 0.f), lightPower); pixels[index].w = 0; pixels[index].x = lightPower.x * 255.f; pixels[index].y = lightPower.y * 255.f; pixels[index].z = lightPower.z * 255.f; // Raymarch step coloring /* float color = MaxRaymarchSteps - steps; float maxColor = MaxRaymarchSteps; pixels[index].w = 0; pixels[index].x = (int)(color*255.f / maxColor) & 0xff; pixels[index].y = (int)(color*255.f / maxColor) & 0xff; pixels[index].z = (int)(color*255.f / maxColor) & 0xff; //*/ } else { glm::vec3 col = PlaneFloor(rayDir, rayOrigin); pixels[index].w = 0; pixels[index].x = (int)col.x & 0xff; pixels[index].y = (int)col.y & 0xff; pixels[index].z = (int)col.z & 0xff; /* if (PlaneFloor(rayDir, rayOrigin)) { if (iteration % 1000 < 600) { pixels[index].w = 0; pixels[index].x = 100 & 0xff; pixels[index].y = 0 & 0xff; pixels[index].z = 0 & 0xff; } else { pixels[index].w = 0; pixels[index].x = 0 & 0xff; pixels[index].y = 100 & 0xff; pixels[index].z = 0 & 0xff; } } else { pixels[index].w = 0; pixels[index].x = 0; pixels[index].y = 0; pixels[index].z = 0; } */ } } __device__ glm::vec3 light(const glm::vec3& lightPos, const glm::vec3& lightColor, const glm::vec3& position, const glm::vec3& normal, bool calcShadow) { if (calcShadow && (shadow(lightPos, position))) { return glm::vec3(0.f, 0.f, 0.f); } glm::vec3 radius(lightPos - position); return glm::vec3(lightColor * fmaxf(dot(normal, normalize(radius)), 0) / (4 * CUDART_PI_F * length(radius)*length(radius))); } __device__ bool shadow(const glm::vec3& lightPos, const glm::vec3& position) { float de = 0.0f; float d = de; glm::vec3 pos(lightPos); glm::vec3 direction(position - lightPos); glm::vec3 dir = normalize(direction); bool hit = false; for (int i = 0; i < MaxRaymarchSteps; ++i) { de = DE(pos); d += de; pos += de * dir; if (de <= EpsilonRaymarch) { hit = true; break; } } if (!hit) { return false; } return (length(direction) - 2 * EpsilonRaymarch > d); } extern "C" void launchKernel(uchar4* pixels, unsigned int width, unsigned int height, float focalLength, glm::mat3 rot, glm::vec3 pos, LOD l) { // setUp: // float epsilon, // int fractalIterations, // int raymarchsteps, // int priSize setUp << <1, 1 >> >(l); unsigned int primRays = l.primRays; hipDeviceSynchronize(); // blockHTreads fps // 16 22 // 256 33 // 512 36 // 1024 18 // Test above run with 1280 x 720 int blockThreads = 512; int totalThreads = height * width; int totalBlocks = totalThreads % blockThreads == 0 ? totalThreads / blockThreads : totalThreads / blockThreads + 1; if (primRays > 1) { // Allocate raymarchSteps and raymarchDistance unsigned char* raymarchSteps; float * raymarchDistance; unsigned int primaryWidth = width % primRays == 0 ? width / primRays : width / primRays + 1; unsigned int primaryHeight = height % primRays == 0 ? height / primRays : height / primRays + 1; unsigned int primarySize = primaryWidth * primaryHeight; hipMalloc((void**)&raymarchSteps, sizeof(unsigned char) * primarySize); // Do only once? hipMalloc((void**)&raymarchDistance, sizeof(float) * primarySize); // Do only once? int blockThreadsPrimary = 256; int totalThreadsPrimary = primarySize; int totalBlocksPrimary = totalThreadsPrimary % blockThreadsPrimary == 0 ? totalThreadsPrimary / blockThreadsPrimary : totalThreadsPrimary / blockThreadsPrimary + 1; primaryRay << <totalBlocksPrimary, blockThreadsPrimary >> >(raymarchSteps, raymarchDistance, width, height, focalLength, primaryWidth, primaryHeight, rot, pos); hipDeviceSynchronize(); // Make sure all primary rays are done secondaryRay << <totalBlocks, blockThreads >> >(pixels, raymarchSteps, raymarchDistance, width, height, focalLength, primaryWidth, primaryHeight, rot, pos); hipDeviceSynchronize(); // Synchronize secondary rays hipFree(raymarchSteps); // Do only once? hipFree(raymarchDistance); // Do only once? } else { singleRay << <totalBlocks, blockThreads >> >(pixels, width, height, focalLength, rot, pos); hipDeviceSynchronize(); // Synchronize rays } } __global__ void primaryRay(unsigned char* raymarchSteps, float* raymarchDistance, unsigned int width, unsigned int height, float focalLength, unsigned int primaryWidth, unsigned int primaryHeight, glm::mat3 rotation, glm::vec3 position) { // Calculate pixel index, x, y const unsigned int index = blockIdx.x * blockDim.x + (threadIdx.x); if (index >= primaryHeight*primaryWidth) { return; } int squareRadius = PrimarySize / 2; const unsigned int x = squareRadius + PrimarySize * (index % primaryWidth); const unsigned int y = squareRadius + PrimarySize * (index / primaryWidth); glm::vec3 direction(x - (width / 2.f), y - (height / 2.f), focalLength); direction = rotation*direction; direction = normalize(direction); glm::vec3 secondDir(x + squareRadius - (width / 2.f), y + squareRadius - (height / 2.f), height / 2.f); secondDir = rotation*secondDir; secondDir = normalize(secondDir); glm::vec3 origin(position); float distance = 0; int steps = 0; // Check bounding sphere if (BoundingSphere(direction, position)) { // Raymarch as long as all neighbouring rays fit //// Only check the corner ray Chapter 4 drive report float de = 0.0f; // Maybe create an Estimate or calculation of first circle float d = de; position += de * direction; for (int i = 0; i < MaxRaymarchSteps; ++i) { de = DE(position); d += de; position += de * direction; // Check if all rays are inside here // TODO fix check if (length(cross(secondDir, position - origin)) > de) { de = 0.0f; } if (de <= EpsilonRaymarch) { distance = d; steps = i; break; } } } // Save result raymarchSteps[index] = steps; raymarchDistance[index] = distance; } __global__ void secondaryRay(uchar4* pixels, unsigned char* raymarchSteps, float* raymarchDistance, unsigned int width, unsigned int height, float focalLength, unsigned int primaryWidth, unsigned int primaryHeight, glm::mat3 rotation, glm::vec3 position) { // Calculate pixel index, x, y const unsigned int index = blockIdx.x * blockDim.x + (threadIdx.x); if (index >= width * height) { return; } int secondarySteps = 0; const unsigned int x = index % width; const unsigned int y = index / width; const unsigned int primaryIndex = x / PrimarySize + (y / PrimarySize) * primaryWidth; // Calculate start position from primary ray glm::vec3 direction(x - (width / 2.f), y - (height / 2.f), focalLength); direction = rotation*direction; direction = normalize(direction); int steps = raymarchSteps[primaryIndex]; float distance = raymarchDistance[primaryIndex]; glm::vec3 pos = position + direction * distance; bool hit = false; if (steps != 0) { // Raymarch until eps for (int i = steps; i < MaxRaymarchSteps; ++i) { secondarySteps++; float de = DE(pos); distance += de; pos += de * direction; if (de <= EpsilonRaymarch) { hit = true; steps = i; break; } } } color(pixels, hit, steps, direction, position, pos, index); } __global__ void singleRay(uchar4* pixels, unsigned int width, unsigned int height, float focalLength, glm::mat3 rotation, glm::vec3 position) { // Calculate pixel index, x, y const unsigned int index = blockIdx.x * blockDim.x + (threadIdx.x); if (index >= width * height) { return; } const unsigned int x = index % width; const unsigned int y = index / width; // Calculate start position from primary ray glm::vec3 direction(x - (width / 2.f), y - (height / 2.f), focalLength); direction = rotation*direction; direction = normalize(direction); glm::vec3 pos = position; bool hit = false; unsigned int steps = 0; float distance = 0; if (BoundingSphere(direction, position)) { // Raymarch until eps for (int i = steps; i < MaxRaymarchSteps; ++i) { float de = DE(pos); distance += de; pos += de * direction; if (de <= EpsilonRaymarch) { hit = true; steps = i; break; } } } else { // Show bounding sphere /* pixels[index].w = 0; pixels[index].x = 255; pixels[index].y = 255; pixels[index].z = 255; return; //*/ } color(pixels, hit, steps, direction, position, pos, index); } __global__ void setUp(LOD l) {//(float epsilon, unsigned int fractalIterations, unsigned int raymarchsteps, unsigned int priSize) { EpsilonRaymarch = l.epsilon; FractalIterations = l.fractalIterations; MaxRaymarchSteps = l.raymarchsteps; if (l.primRays == 1) { PrimarySize = 1; PrimaryRays = false; } else { PrimarySize = l.primRays; PrimaryRays = true; } iteration++; } /* CUDA time event float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); setUp<< <1, 1>> >(.0005f, 10, 60); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Time %f", time); */ /* Spectrum background pixel.w = 0; pixel.x = (256 * x / (width)) & 0xff; pixel.y = (256 * y / (height)) & 0xff; pixel.z = 10; */
ccd0461ee3d9348bc95d7b4281f0ab24c0ef6eaa.cu
#include <windows.h> #include <stdio.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include <device_launch_parameters.h> #include <math_constants.h> #include "kernel.h" #include "constants.h" #define CAMERALIGHT 0 #define CIRCLETINT 0 #define LIGHTFRONT 1 #define LIGHTABOVE 0 #define LIGHTBELOW 0 #define LIGHTBEHIND 0 #define COLOR 1 __device__ float EpsilonRaymarch = 0; __device__ unsigned int MaxRaymarchSteps = 0; __device__ unsigned int FractalIterations = 0; __device__ bool PrimaryRays = false; __device__ unsigned int PrimarySize = 0; __device__ unsigned int iteration = 0; void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); } } __device__ float DE(const glm::vec3& pos) { //return DEMandelbulb2(pos); return DEMandelbulb1(pos); //return DETetredon(pos); //return DESphere1(pos); //return glm::min(DEMandelbulb2(pos) + DETetredon(pos)); } /* * Distance Estiamtor for a Sphere in origo with radius 3 */ __device__ float DESphere1(const glm::vec3& pos) { float distance = length(pos); if (distance > 3) { distance = distance - 3; } else { distance = 0; } return distance; } /* * Distance estimator for a Tetredon. */ __device__ float DETetredon(const glm::vec3& pos) { glm::vec3 z(pos); float Scale = 2.f; glm::vec3 a1(1, 1, 1); glm::vec3 a2(-1, -1, 1); glm::vec3 a3(1, -1, -1); glm::vec3 a4(-1, 1, -1); glm::vec3 c; int n = 0; float dist, d; while (n < FractalIterations) { c = a1; dist = length(z - a1); d = length(z - a2); if (d < dist) { c = a2; dist = d; } d = length(z - a3); if (d < dist) { c = a3; dist = d; } d = length(z - a4); if (d < dist) { c = a4; dist = d; } z = Scale*z - c*(Scale - 1.0f); n++; } return length(z) * pow(Scale, float(-n)); } /* * Distance estimator for a Mandelbulb. Version 1 */ __device__ float DEMandelbulb1(const glm::vec3& p) { glm::vec3 z = p; float r = 0.0f; float dr = 1.0f; float power = 8.f; for (int i = 0; i < FractalIterations; ++i) { r = length(z); if (r > 8.f) break; // To polar coordianates float theta = acosf(z.y / r); float phi = atan2f(z.x, z.z); float r7 = glm::pow(r, power - 1.f); // Derivative dr = r7 * power * dr + 1.0f; // "Squaring" the length float zr = r7*r; // "Double" the angle theta = theta*power; phi = phi * power; // From polar coordianates z = p + zr*glm::vec3(sinf(phi)*sinf(theta), cosf(theta), sinf(theta) * cosf(phi)); } return 0.5f*logf(r)*r / dr; } /* * Distance estimator for a Mandelbulb. Version 2. Does not show egde cases when we are straight above or below the fractal. */ __device__ float DEMandelbulb2(const glm::vec3& pos) { glm::vec3 zz = pos; float m = dot(zz); float dz = 1.0f; for (int i = 0; i < FractalIterations; ++i) { float m2 = m*m; float m4 = m2*m2; dz = 8.0f*sqrtf(m4*m2*m)*dz + 1.0f; float x = zz.x; float x2 = zz.x*zz.x; float x4 = x2*x2; float y = zz.y; float y2 = zz.y* zz.y; float y4 = y2*y2; float z = zz.z; float z2 = zz.z*zz.z; float z4 = z2*z2; float k3 = x2 + z2; float k2 = 1.f / sqrtf(k3*k3*k3*k3*k3*k3*k3); float k1 = x4 + y4 + z4 - 6.0f*y2*z2 - 6.0f*x2*y2 + 2.0f*z2*x2; float k4 = x2 - y2 + z2; zz.x = pos.x + 64.0f*x*y*z*(x2 - z2)*k4*(x4 - 6.0f*x2*z2 + z4)*k1*k2; zz.y = pos.y + -16.0f*y2*k3*k4*k4 + k1*k1; zz.z = pos.z + -8.0f*y*k4*(x4*x4 - 28.0f*x4*x2*z2 + 70.0f*x4*z4 - 28.0f*x2*z2*z4 + z4*z4)*k1*k2; m = dot(zz); if (m > 1000.0f) break; } return 0.25f*logf(m)*sqrtf(m) / dz; } __device__ bool BoundingSphere(const glm::vec3& dir, const glm::vec3& pos) { float rSquared = 1.2f * 1.2f; return true; if (dot(pos) <= rSquared) { return true; } else if (dot(pos, dir) <= 0.0f) { glm::vec3 v = pos - dir * dot(pos, dir); if (dot(v) <= rSquared) { return true; } } return false; } __device__ glm::vec3 PlaneFloor(const glm::vec3& dir, const glm::vec3& pos) { float denom = dot(glm::vec3(0, 1, 0), dir); if (denom > 0.0001f) { // Only visible from above float t = dot(glm::vec3(0, 1.5f, 0) - pos, (glm::vec3(0, 1, 0))) / denom; if (t >= 0.0f) { glm::vec3 collision = pos + t * dir; if (((int)floorf(collision.x) % 2 == 0 || (int)floorf(collision.z) % 2 == 0) && !((int)floorf(collision.x) % 2 == 0 && (int)floorf(collision.z) % 2 == 0)) { return glm::vec3(100.f, 100.f, 100.f); } else { return glm::vec3(50.f, 50.f, 50.f); } /* float distanceFromOrigo = length(pos + t * dir); if (distanceFromOrigo > 3 && distanceFromOrigo < 7) { return true; } */ } } return glm::vec3(0, 0, 0); } __device__ void color(uchar4* pixels, bool hit, unsigned int steps, glm::vec3 rayDir, glm::vec3 rayOrigin, glm::vec3 position, int index) { // Draw color to pixels if (hit) { float normalEpsilon = 0.000005f; // TODO find a good epsilon for normal glm::vec3 normal(DE(position + glm::vec3(normalEpsilon, 0, 0)) - DE(position - glm::vec3(normalEpsilon, 0, 0)), DE(position + glm::vec3(0, normalEpsilon, 0)) - DE(position - glm::vec3(0, normalEpsilon, 0)), DE(position + glm::vec3(0, 0, normalEpsilon)) - DE(position - glm::vec3(0, 0, normalEpsilon))); normal = normalize(normal); glm::vec3 lightPower(0.f, 0.f, 0.f); #if COLOR // Global illumination lightPower += glm::abs(glm::vec3(position.x, position.y, position.z)) / 2.0f; #else lightPower += glm::vec3(0.1f, 0.1f, 0.1f); #endif #if LIGHTFRONT // Light side lightPower += light(glm::vec3(2.0f, 0.f, -2.f), glm::vec3(1.f, 1.f, 1.f) * 24.f, position, normal, true); #endif #if LIGHTBELOW // Light below lightPower += light(glm::vec3(0.f, 2.f, 0.5f), glm::vec3(1.f, 1.f, 1.f)* 5.f, position, normal, true); #endif #if LIGHTABOVE // Light above lightPower += light(glm::vec3(0.f, -2.f, 0.5f), glm::vec3(1.f, 1.f, 1.f)* 5.f, position, normal, true); #endif #if LIGHTBEHIND // Light behind lightPower += light(glm::vec3(-2.0f, 0.f, -2.0f), glm::vec3(1.f, 1.f, 1.f) * 24.f, position, normal, true); #endif #if CIRCLETINT float tintFactor = 5.f; if (iteration % 1000 < 600) { lightPower += light(glm::vec3(3.0f, 1.f, 0.f), glm::vec3(1.f, 0.f, 0.f) *tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, 3.f), glm::vec3(1.f, 0.f, 0.f) * tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, -3.f), glm::vec3(1.f, 0.f, 0.f) * tintFactor, position, normal, true); lightPower += light(glm::vec3(-3.0f, 1.f, 0.f), glm::vec3(1.f, 0.f, 0.f) * tintFactor, position, normal, true); } else { lightPower += light(glm::vec3(3.0f, 1.f, 0.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, 3.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); lightPower += light(glm::vec3(0.0f, 1.f, -3.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); lightPower += light(glm::vec3(-3.0f, 1.f, 0.f), glm::vec3(0.f, 1.f, 0.f)* tintFactor, position, normal, true); } #endif #if CAMERALIGHT // Use camera as a light lightPower += light(rayOrigin, glm::vec3(1.f, 1.f, 1.f) * 5.f, position, normal, false); #endif // Clamping lightPower = glm::min(glm::vec3(1.f, 1.f, 1.f), lightPower); lightPower = glm::max(glm::vec3(0.f, 0.f, 0.f), lightPower); pixels[index].w = 0; pixels[index].x = lightPower.x * 255.f; pixels[index].y = lightPower.y * 255.f; pixels[index].z = lightPower.z * 255.f; // Raymarch step coloring /* float color = MaxRaymarchSteps - steps; float maxColor = MaxRaymarchSteps; pixels[index].w = 0; pixels[index].x = (int)(color*255.f / maxColor) & 0xff; pixels[index].y = (int)(color*255.f / maxColor) & 0xff; pixels[index].z = (int)(color*255.f / maxColor) & 0xff; //*/ } else { glm::vec3 col = PlaneFloor(rayDir, rayOrigin); pixels[index].w = 0; pixels[index].x = (int)col.x & 0xff; pixels[index].y = (int)col.y & 0xff; pixels[index].z = (int)col.z & 0xff; /* if (PlaneFloor(rayDir, rayOrigin)) { if (iteration % 1000 < 600) { pixels[index].w = 0; pixels[index].x = 100 & 0xff; pixels[index].y = 0 & 0xff; pixels[index].z = 0 & 0xff; } else { pixels[index].w = 0; pixels[index].x = 0 & 0xff; pixels[index].y = 100 & 0xff; pixels[index].z = 0 & 0xff; } } else { pixels[index].w = 0; pixels[index].x = 0; pixels[index].y = 0; pixels[index].z = 0; } */ } } __device__ glm::vec3 light(const glm::vec3& lightPos, const glm::vec3& lightColor, const glm::vec3& position, const glm::vec3& normal, bool calcShadow) { if (calcShadow && (shadow(lightPos, position))) { return glm::vec3(0.f, 0.f, 0.f); } glm::vec3 radius(lightPos - position); return glm::vec3(lightColor * fmaxf(dot(normal, normalize(radius)), 0) / (4 * CUDART_PI_F * length(radius)*length(radius))); } __device__ bool shadow(const glm::vec3& lightPos, const glm::vec3& position) { float de = 0.0f; float d = de; glm::vec3 pos(lightPos); glm::vec3 direction(position - lightPos); glm::vec3 dir = normalize(direction); bool hit = false; for (int i = 0; i < MaxRaymarchSteps; ++i) { de = DE(pos); d += de; pos += de * dir; if (de <= EpsilonRaymarch) { hit = true; break; } } if (!hit) { return false; } return (length(direction) - 2 * EpsilonRaymarch > d); } extern "C" void launchKernel(uchar4* pixels, unsigned int width, unsigned int height, float focalLength, glm::mat3 rot, glm::vec3 pos, LOD l) { // setUp: // float epsilon, // int fractalIterations, // int raymarchsteps, // int priSize setUp << <1, 1 >> >(l); unsigned int primRays = l.primRays; cudaThreadSynchronize(); // blockHTreads fps // 16 22 // 256 33 // 512 36 // 1024 18 // Test above run with 1280 x 720 int blockThreads = 512; int totalThreads = height * width; int totalBlocks = totalThreads % blockThreads == 0 ? totalThreads / blockThreads : totalThreads / blockThreads + 1; if (primRays > 1) { // Allocate raymarchSteps and raymarchDistance unsigned char* raymarchSteps; float * raymarchDistance; unsigned int primaryWidth = width % primRays == 0 ? width / primRays : width / primRays + 1; unsigned int primaryHeight = height % primRays == 0 ? height / primRays : height / primRays + 1; unsigned int primarySize = primaryWidth * primaryHeight; cudaMalloc((void**)&raymarchSteps, sizeof(unsigned char) * primarySize); // Do only once? cudaMalloc((void**)&raymarchDistance, sizeof(float) * primarySize); // Do only once? int blockThreadsPrimary = 256; int totalThreadsPrimary = primarySize; int totalBlocksPrimary = totalThreadsPrimary % blockThreadsPrimary == 0 ? totalThreadsPrimary / blockThreadsPrimary : totalThreadsPrimary / blockThreadsPrimary + 1; primaryRay << <totalBlocksPrimary, blockThreadsPrimary >> >(raymarchSteps, raymarchDistance, width, height, focalLength, primaryWidth, primaryHeight, rot, pos); cudaThreadSynchronize(); // Make sure all primary rays are done secondaryRay << <totalBlocks, blockThreads >> >(pixels, raymarchSteps, raymarchDistance, width, height, focalLength, primaryWidth, primaryHeight, rot, pos); cudaThreadSynchronize(); // Synchronize secondary rays cudaFree(raymarchSteps); // Do only once? cudaFree(raymarchDistance); // Do only once? } else { singleRay << <totalBlocks, blockThreads >> >(pixels, width, height, focalLength, rot, pos); cudaThreadSynchronize(); // Synchronize rays } } __global__ void primaryRay(unsigned char* raymarchSteps, float* raymarchDistance, unsigned int width, unsigned int height, float focalLength, unsigned int primaryWidth, unsigned int primaryHeight, glm::mat3 rotation, glm::vec3 position) { // Calculate pixel index, x, y const unsigned int index = blockIdx.x * blockDim.x + (threadIdx.x); if (index >= primaryHeight*primaryWidth) { return; } int squareRadius = PrimarySize / 2; const unsigned int x = squareRadius + PrimarySize * (index % primaryWidth); const unsigned int y = squareRadius + PrimarySize * (index / primaryWidth); glm::vec3 direction(x - (width / 2.f), y - (height / 2.f), focalLength); direction = rotation*direction; direction = normalize(direction); glm::vec3 secondDir(x + squareRadius - (width / 2.f), y + squareRadius - (height / 2.f), height / 2.f); secondDir = rotation*secondDir; secondDir = normalize(secondDir); glm::vec3 origin(position); float distance = 0; int steps = 0; // Check bounding sphere if (BoundingSphere(direction, position)) { // Raymarch as long as all neighbouring rays fit //// Only check the corner ray Chapter 4 drive report float de = 0.0f; // Maybe create an Estimate or calculation of first circle float d = de; position += de * direction; for (int i = 0; i < MaxRaymarchSteps; ++i) { de = DE(position); d += de; position += de * direction; // Check if all rays are inside here // TODO fix check if (length(cross(secondDir, position - origin)) > de) { de = 0.0f; } if (de <= EpsilonRaymarch) { distance = d; steps = i; break; } } } // Save result raymarchSteps[index] = steps; raymarchDistance[index] = distance; } __global__ void secondaryRay(uchar4* pixels, unsigned char* raymarchSteps, float* raymarchDistance, unsigned int width, unsigned int height, float focalLength, unsigned int primaryWidth, unsigned int primaryHeight, glm::mat3 rotation, glm::vec3 position) { // Calculate pixel index, x, y const unsigned int index = blockIdx.x * blockDim.x + (threadIdx.x); if (index >= width * height) { return; } int secondarySteps = 0; const unsigned int x = index % width; const unsigned int y = index / width; const unsigned int primaryIndex = x / PrimarySize + (y / PrimarySize) * primaryWidth; // Calculate start position from primary ray glm::vec3 direction(x - (width / 2.f), y - (height / 2.f), focalLength); direction = rotation*direction; direction = normalize(direction); int steps = raymarchSteps[primaryIndex]; float distance = raymarchDistance[primaryIndex]; glm::vec3 pos = position + direction * distance; bool hit = false; if (steps != 0) { // Raymarch until eps for (int i = steps; i < MaxRaymarchSteps; ++i) { secondarySteps++; float de = DE(pos); distance += de; pos += de * direction; if (de <= EpsilonRaymarch) { hit = true; steps = i; break; } } } color(pixels, hit, steps, direction, position, pos, index); } __global__ void singleRay(uchar4* pixels, unsigned int width, unsigned int height, float focalLength, glm::mat3 rotation, glm::vec3 position) { // Calculate pixel index, x, y const unsigned int index = blockIdx.x * blockDim.x + (threadIdx.x); if (index >= width * height) { return; } const unsigned int x = index % width; const unsigned int y = index / width; // Calculate start position from primary ray glm::vec3 direction(x - (width / 2.f), y - (height / 2.f), focalLength); direction = rotation*direction; direction = normalize(direction); glm::vec3 pos = position; bool hit = false; unsigned int steps = 0; float distance = 0; if (BoundingSphere(direction, position)) { // Raymarch until eps for (int i = steps; i < MaxRaymarchSteps; ++i) { float de = DE(pos); distance += de; pos += de * direction; if (de <= EpsilonRaymarch) { hit = true; steps = i; break; } } } else { // Show bounding sphere /* pixels[index].w = 0; pixels[index].x = 255; pixels[index].y = 255; pixels[index].z = 255; return; //*/ } color(pixels, hit, steps, direction, position, pos, index); } __global__ void setUp(LOD l) {//(float epsilon, unsigned int fractalIterations, unsigned int raymarchsteps, unsigned int priSize) { EpsilonRaymarch = l.epsilon; FractalIterations = l.fractalIterations; MaxRaymarchSteps = l.raymarchsteps; if (l.primRays == 1) { PrimarySize = 1; PrimaryRays = false; } else { PrimarySize = l.primRays; PrimaryRays = true; } iteration++; } /* CUDA time event float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); setUp<< <1, 1>> >(.0005f, 10, 60); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time %f", time); */ /* Spectrum background pixel.w = 0; pixel.x = (256 * x / (width)) & 0xff; pixel.y = (256 * y / (height)) & 0xff; pixel.z = 10; */
8a159da4a813128433f305e12857e2d0259bbe80.hip
// !!! This is a file automatically generated by hipify!!! #include "../../../src/common/compressed_iterator.h" #include "../../../src/common/device_helpers.cuh" #include "gtest/gtest.h" #include <algorithm> #include <thrust/device_vector.h> namespace xgboost { namespace common { struct WriteSymbolFunction { CompressedBufferWriter cbw; unsigned char* buffer_data_d; int* input_data_d; WriteSymbolFunction(CompressedBufferWriter cbw, unsigned char* buffer_data_d, int* input_data_d) : cbw(cbw), buffer_data_d(buffer_data_d), input_data_d(input_data_d) {} __device__ void operator()(size_t i) { cbw.AtomicWriteSymbol(buffer_data_d, input_data_d[i], i); } }; struct ReadSymbolFunction { CompressedIterator<int> ci; int* output_data_d; ReadSymbolFunction(CompressedIterator<int> ci, int* output_data_d) : ci(ci), output_data_d(output_data_d) {} __device__ void operator()(size_t i) { output_data_d[i] = ci[i]; } }; TEST(CompressedIterator, TestGPU) { dh::safe_cuda(hipSetDevice(0)); std::vector<int> test_cases = {1, 3, 426, 21, 64, 256, 100000, INT32_MAX}; int num_elements = 1000; int repetitions = 1000; srand(9); for (auto alphabet_size : test_cases) { for (int i = 0; i < repetitions; i++) { std::vector<int> input(num_elements); std::generate(input.begin(), input.end(), [=]() { return rand() % alphabet_size; }); CompressedBufferWriter cbw(alphabet_size); thrust::device_vector<int> input_d(input); thrust::device_vector<unsigned char> buffer_d( CompressedBufferWriter::CalculateBufferSize(input.size(), alphabet_size)); // write the data on device auto input_data_d = input_d.data().get(); auto buffer_data_d = buffer_d.data().get(); dh::LaunchN(input_d.size(), WriteSymbolFunction(cbw, buffer_data_d, input_data_d)); // read the data on device CompressedIterator<int> ci(buffer_d.data().get(), alphabet_size); thrust::device_vector<int> output_d(input.size()); auto output_data_d = output_d.data().get(); dh::LaunchN(output_d.size(), ReadSymbolFunction(ci, output_data_d)); std::vector<int> output(output_d.size()); thrust::copy(output_d.begin(), output_d.end(), output.begin()); ASSERT_TRUE(input == output); } } } } // namespace common } // namespace xgboost
8a159da4a813128433f305e12857e2d0259bbe80.cu
#include "../../../src/common/compressed_iterator.h" #include "../../../src/common/device_helpers.cuh" #include "gtest/gtest.h" #include <algorithm> #include <thrust/device_vector.h> namespace xgboost { namespace common { struct WriteSymbolFunction { CompressedBufferWriter cbw; unsigned char* buffer_data_d; int* input_data_d; WriteSymbolFunction(CompressedBufferWriter cbw, unsigned char* buffer_data_d, int* input_data_d) : cbw(cbw), buffer_data_d(buffer_data_d), input_data_d(input_data_d) {} __device__ void operator()(size_t i) { cbw.AtomicWriteSymbol(buffer_data_d, input_data_d[i], i); } }; struct ReadSymbolFunction { CompressedIterator<int> ci; int* output_data_d; ReadSymbolFunction(CompressedIterator<int> ci, int* output_data_d) : ci(ci), output_data_d(output_data_d) {} __device__ void operator()(size_t i) { output_data_d[i] = ci[i]; } }; TEST(CompressedIterator, TestGPU) { dh::safe_cuda(cudaSetDevice(0)); std::vector<int> test_cases = {1, 3, 426, 21, 64, 256, 100000, INT32_MAX}; int num_elements = 1000; int repetitions = 1000; srand(9); for (auto alphabet_size : test_cases) { for (int i = 0; i < repetitions; i++) { std::vector<int> input(num_elements); std::generate(input.begin(), input.end(), [=]() { return rand() % alphabet_size; }); CompressedBufferWriter cbw(alphabet_size); thrust::device_vector<int> input_d(input); thrust::device_vector<unsigned char> buffer_d( CompressedBufferWriter::CalculateBufferSize(input.size(), alphabet_size)); // write the data on device auto input_data_d = input_d.data().get(); auto buffer_data_d = buffer_d.data().get(); dh::LaunchN(input_d.size(), WriteSymbolFunction(cbw, buffer_data_d, input_data_d)); // read the data on device CompressedIterator<int> ci(buffer_d.data().get(), alphabet_size); thrust::device_vector<int> output_d(input.size()); auto output_data_d = output_d.data().get(); dh::LaunchN(output_d.size(), ReadSymbolFunction(ci, output_data_d)); std::vector<int> output(output_d.size()); thrust::copy(output_d.begin(), output_d.end(), output.begin()); ASSERT_TRUE(input == output); } } } } // namespace common } // namespace xgboost
fca024445239c486fc3f15f3e57c437faea628d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" texture<float, 1, hipReadModeElementType> texRef; __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N); // Kernelul ce se executa pe device-ul CUDA __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //if(idx < N){ r_d[idx] = - tex1Dfetch(texRef, idx); //} } extern "C" hipError_t launch_actiune_thread(float* a_d, float* b_d,float *r_d,int N,dim3 DIM_GRID, dim3 DIM_BLOCK) { hipLaunchKernelGGL(( actiune_thread) , dim3(DIM_GRID), dim3(DIM_BLOCK), 0, 0, a_d, b_d,r_d,N); return hipGetLastError(); } extern "C" hipError_t legare(size_t * offset,const void * devPtr,size_t size = UINT_MAX){ hipBindTexture(offset, texRef, devPtr, UINT_MAX); return hipGetLastError(); } extern "C" hipError_t release(){ hipUnbindTexture(texRef); return hipGetLastError(); }
fca024445239c486fc3f15f3e57c437faea628d4.cu
texture<float, 1, cudaReadModeElementType> texRef; __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N); // Kernelul ce se executa pe device-ul CUDA __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //if(idx < N){ r_d[idx] = - tex1Dfetch(texRef, idx); //} } extern "C" cudaError_t launch_actiune_thread(float* a_d, float* b_d,float *r_d,int N,dim3 DIM_GRID, dim3 DIM_BLOCK) { actiune_thread <<<DIM_GRID, DIM_BLOCK>>> (a_d, b_d,r_d,N); return cudaGetLastError(); } extern "C" cudaError_t legare(size_t * offset,const void * devPtr,size_t size = UINT_MAX){ cudaBindTexture(offset, texRef, devPtr, UINT_MAX); return cudaGetLastError(); } extern "C" cudaError_t release(){ cudaUnbindTexture(texRef); return cudaGetLastError(); }
c0bcec69177c61f2c23368ebc441e90f63aa3981.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // CUDA driver & runtime #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <hipfftXt.h> #include "common.h" #ifdef JIT #include "ptxjit.h" #endif // JIT #define TILE_DIM 32 #define BLOCK_ROWS 8 //////////////////////////////////////////////////////////////////////////////// // Callback Implementations //////////////////////////////////////////////////////////////////////////////// __device__ hipfftReal CB_ConvertInputR(void *dataIn, size_t offset, void *callerInfo, void *sharedPtr) { char element = ((char*)dataIn)[offset]; return (hipfftReal)((float)element/127.0f); } __device__ cufftCallbackLoadR d_loadCallbackPtr = CB_ConvertInputR; __device__ void CB_ConvolveAndStoreTransposedC(void *dataOut, size_t offset, hipfftComplex element, void *callerInfo, void *sharedPtr) { hipfftComplex *filter = (hipfftComplex*)callerInfo; size_t row = offset / COMPLEX_SIGNAL_SIZE; size_t col = offset % COMPLEX_SIGNAL_SIZE; ((hipfftComplex*)dataOut)[col * BATCH_SIZE + row] = ComplexMul(element, filter[col]); } __device__ cufftCallbackStoreC d_storeCallbackPtr = CB_ConvolveAndStoreTransposedC; #ifndef JIT // postprocessing __global__ void ConvolveAndStoreTransposedC_Optimized( const hipfftComplex * __restrict__ dataIn, hipfftComplex * __restrict__ dataOut, const hipfftComplex * __restrict__ filter) { __shared__ hipfftComplex tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int yBase = blockIdx.y * TILE_DIM + threadIdx.y; if(x < COMPLEX_SIGNAL_SIZE) { for(int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int y = yBase + j; if(y >= BATCH_SIZE) break; hipfftComplex value = ComplexMul(dataIn[y * COMPLEX_SIGNAL_SIZE + x], filter[x]); tile[threadIdx.y + j][threadIdx.x] = value; } } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; yBase = blockIdx.x * TILE_DIM + threadIdx.y; if(x < BATCH_SIZE) { for(int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { int y = yBase + j; if(y >= COMPLEX_SIGNAL_SIZE) break; dataOut[y * BATCH_SIZE + x] = tile[threadIdx.x][threadIdx.y + j]; } } } #endif //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { struct hipDeviceProp_t properties; int device = argc > 1 ? atoi(argv[1]) : 0; checkCudaErrors(hipGetDevice(&device)); checkCudaErrors(hipGetDeviceProperties(&properties, device)); if( !(properties.major >= 2) ) { printf("This sample requires CUDA architecture SM2.0 or higher\n"); exit(EXIT_FAILURE); } // Allocate and initialize memory printf("Preparing input: %dx%d\n", BATCH_SIZE, INPUT_SIGNAL_SIZE); char *_8bit_signal; hipfftComplex *result, *filter; hipfftComplex *tmp_result; checkCudaErrors(hipMallocManaged(&_8bit_signal, sizeof(char) * INPUT_SIGNAL_SIZE * BATCH_SIZE, hipMemAttachGlobal)); checkCudaErrors(hipMallocManaged(&result, sizeof(hipfftComplex) * COMPLEX_SIGNAL_SIZE * BATCH_SIZE, hipMemAttachGlobal)); checkCudaErrors(hipMallocManaged(&tmp_result, sizeof(hipfftComplex) * COMPLEX_SIGNAL_SIZE * BATCH_SIZE, hipMemAttachGlobal)); checkCudaErrors(hipMallocManaged(&filter, sizeof(hipfftComplex) * COMPLEX_SIGNAL_SIZE, hipMemAttachGlobal)); initInputs(_8bit_signal, filter); //compute reference result for later verification printf("Computing reference solution\n"); hipfftComplex *reference = computeReference(_8bit_signal, filter); printf("Creating FFT plan\n"); hipfftHandle fftPlan; size_t workSize; checkCudaErrors(hipfftCreate(&fftPlan)); int signalSize = INPUT_SIGNAL_SIZE; checkCudaErrors(hipfftMakePlanMany(fftPlan, 1, &signalSize, 0,0,0,0,0,0, HIPFFT_R2C, BATCH_SIZE, &workSize)); /* * Retrieve address of callback functions on the device */ cufftCallbackLoadR h_loadCallbackPtr; cufftCallbackStoreC h_storeCallbackPtr; checkCudaErrors(hipMemcpyFromSymbol(&h_loadCallbackPtr, d_loadCallbackPtr, sizeof(h_loadCallbackPtr))); checkCudaErrors(hipMemcpyFromSymbol(&h_storeCallbackPtr, d_storeCallbackPtr, sizeof(h_storeCallbackPtr))); // Now associate the callbacks with the plan. hipfftResult status = cufftXtSetCallback(fftPlan, (void **)&h_loadCallbackPtr, CUFFT_CB_LD_REAL, 0); if (status == HIPFFT_LICENSE_ERROR) { printf("This sample requires a valid license file.\n"); printf("The file was either not found, out of date, or otherwise invalid.\n"); exit(EXIT_FAILURE); } else { checkCudaErrors(status); } //checkCudaErrors(cufftXtSetCallback(fftPlan, (void **)&h_storeCallbackPtr, CUFFT_CB_ST_COMPLEX, (void **)&filter)); #ifdef JIT hipModule_t hModule = 0; hipFunction_t hKernel = 0; CUlinkState lState; ptxJIT(argc, argv, &hModule, &hKernel, &lState); #endif //create timers hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); float elapsedTime; printf("Running %d iterations\n", ITERATIONS); #ifndef JIT checkCudaErrors(hipEventRecord(start, 0)); #endif /* * The actual Computation */ dim3 block(TILE_DIM, BLOCK_ROWS); dim3 grid((COMPLEX_SIGNAL_SIZE + block.x - 1)/block.x, (BATCH_SIZE + block.y - 1)/block.y); for(int i = 0; i < ITERATIONS; i++) { checkCudaErrors(hipfftExecR2C(fftPlan, (hipfftReal*)_8bit_signal, tmp_result)); #ifndef JIT // Step 3 hipLaunchKernelGGL(( ConvolveAndStoreTransposedC_Optimized), dim3(grid), dim3(block), 0, 0, tmp_result, result, filter); #else checkCudaErrors(hipFuncSetBlockShape(hKernel, TILE_DIM, BLOCK_ROWS, 1)); int paramOffset = 0; SET_KERNEL_PARAM(hKernel, tmp_result, paramOffset); SET_KERNEL_PARAM(hKernel, result, paramOffset); SET_KERNEL_PARAM(hKernel, filter, paramOffset); // Launch the kernel (Driver API_) checkCudaErrors(hipLaunchGrid(hKernel, (COMPLEX_SIGNAL_SIZE + block.x - 1)/block.x, (BATCH_SIZE + block.y - 1)/block.y)); printf("JIT CUDA kernel launched\n"); #endif checkCudaErrors(hipGetLastError()); } #ifndef JIT checkCudaErrors(hipEventRecord(end, 0)); checkCudaErrors(hipEventSynchronize(end)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, end)); printf("Time for the FFT: %fms\n", elapsedTime); #endif checkCudaErrors(hipGetLastError()); //Verify correct result if(postprocess(reference, result, COMPLEX_SIGNAL_SIZE * BATCH_SIZE)) { printf("Verification successful.\n"); } else { printf("!!! Verification Failed !!!\n"); } //Cleanup checkCudaErrors(hipfftDestroy(fftPlan)); checkCudaErrors(hipFree(_8bit_signal)); checkCudaErrors(hipFree(result)); checkCudaErrors(hipFree(filter)); checkCudaErrors(hipFree(reference)); #ifdef JIT if (hModule) { checkCudaErrors(hipModuleUnload(hModule)); hModule = 0; } #endif //clean up driver state hipDeviceReset(); printf("Done\n"); return 0; }
c0bcec69177c61f2c23368ebc441e90f63aa3981.cu
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // CUDA driver & runtime #include <cuda.h> #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> #include "common.h" #ifdef JIT #include "ptxjit.h" #endif // JIT #define TILE_DIM 32 #define BLOCK_ROWS 8 //////////////////////////////////////////////////////////////////////////////// // Callback Implementations //////////////////////////////////////////////////////////////////////////////// __device__ cufftReal CB_ConvertInputR(void *dataIn, size_t offset, void *callerInfo, void *sharedPtr) { char element = ((char*)dataIn)[offset]; return (cufftReal)((float)element/127.0f); } __device__ cufftCallbackLoadR d_loadCallbackPtr = CB_ConvertInputR; __device__ void CB_ConvolveAndStoreTransposedC(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr) { cufftComplex *filter = (cufftComplex*)callerInfo; size_t row = offset / COMPLEX_SIGNAL_SIZE; size_t col = offset % COMPLEX_SIGNAL_SIZE; ((cufftComplex*)dataOut)[col * BATCH_SIZE + row] = ComplexMul(element, filter[col]); } __device__ cufftCallbackStoreC d_storeCallbackPtr = CB_ConvolveAndStoreTransposedC; #ifndef JIT // postprocessing __global__ void ConvolveAndStoreTransposedC_Optimized( const cufftComplex * __restrict__ dataIn, cufftComplex * __restrict__ dataOut, const cufftComplex * __restrict__ filter) { __shared__ cufftComplex tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int yBase = blockIdx.y * TILE_DIM + threadIdx.y; if(x < COMPLEX_SIGNAL_SIZE) { for(int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int y = yBase + j; if(y >= BATCH_SIZE) break; cufftComplex value = ComplexMul(dataIn[y * COMPLEX_SIGNAL_SIZE + x], filter[x]); tile[threadIdx.y + j][threadIdx.x] = value; } } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; yBase = blockIdx.x * TILE_DIM + threadIdx.y; if(x < BATCH_SIZE) { for(int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { int y = yBase + j; if(y >= COMPLEX_SIGNAL_SIZE) break; dataOut[y * BATCH_SIZE + x] = tile[threadIdx.x][threadIdx.y + j]; } } } #endif //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { struct cudaDeviceProp properties; int device = argc > 1 ? atoi(argv[1]) : 0; checkCudaErrors(cudaGetDevice(&device)); checkCudaErrors(cudaGetDeviceProperties(&properties, device)); if( !(properties.major >= 2) ) { printf("This sample requires CUDA architecture SM2.0 or higher\n"); exit(EXIT_FAILURE); } // Allocate and initialize memory printf("Preparing input: %dx%d\n", BATCH_SIZE, INPUT_SIGNAL_SIZE); char *_8bit_signal; cufftComplex *result, *filter; cufftComplex *tmp_result; checkCudaErrors(cudaMallocManaged(&_8bit_signal, sizeof(char) * INPUT_SIGNAL_SIZE * BATCH_SIZE, cudaMemAttachGlobal)); checkCudaErrors(cudaMallocManaged(&result, sizeof(cufftComplex) * COMPLEX_SIGNAL_SIZE * BATCH_SIZE, cudaMemAttachGlobal)); checkCudaErrors(cudaMallocManaged(&tmp_result, sizeof(cufftComplex) * COMPLEX_SIGNAL_SIZE * BATCH_SIZE, cudaMemAttachGlobal)); checkCudaErrors(cudaMallocManaged(&filter, sizeof(cufftComplex) * COMPLEX_SIGNAL_SIZE, cudaMemAttachGlobal)); initInputs(_8bit_signal, filter); //compute reference result for later verification printf("Computing reference solution\n"); cufftComplex *reference = computeReference(_8bit_signal, filter); printf("Creating FFT plan\n"); cufftHandle fftPlan; size_t workSize; checkCudaErrors(cufftCreate(&fftPlan)); int signalSize = INPUT_SIGNAL_SIZE; checkCudaErrors(cufftMakePlanMany(fftPlan, 1, &signalSize, 0,0,0,0,0,0, CUFFT_R2C, BATCH_SIZE, &workSize)); /* * Retrieve address of callback functions on the device */ cufftCallbackLoadR h_loadCallbackPtr; cufftCallbackStoreC h_storeCallbackPtr; checkCudaErrors(cudaMemcpyFromSymbol(&h_loadCallbackPtr, d_loadCallbackPtr, sizeof(h_loadCallbackPtr))); checkCudaErrors(cudaMemcpyFromSymbol(&h_storeCallbackPtr, d_storeCallbackPtr, sizeof(h_storeCallbackPtr))); // Now associate the callbacks with the plan. cufftResult status = cufftXtSetCallback(fftPlan, (void **)&h_loadCallbackPtr, CUFFT_CB_LD_REAL, 0); if (status == CUFFT_LICENSE_ERROR) { printf("This sample requires a valid license file.\n"); printf("The file was either not found, out of date, or otherwise invalid.\n"); exit(EXIT_FAILURE); } else { checkCudaErrors(status); } //checkCudaErrors(cufftXtSetCallback(fftPlan, (void **)&h_storeCallbackPtr, CUFFT_CB_ST_COMPLEX, (void **)&filter)); #ifdef JIT CUmodule hModule = 0; CUfunction hKernel = 0; CUlinkState lState; ptxJIT(argc, argv, &hModule, &hKernel, &lState); #endif //create timers cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); float elapsedTime; printf("Running %d iterations\n", ITERATIONS); #ifndef JIT checkCudaErrors(cudaEventRecord(start, 0)); #endif /* * The actual Computation */ dim3 block(TILE_DIM, BLOCK_ROWS); dim3 grid((COMPLEX_SIGNAL_SIZE + block.x - 1)/block.x, (BATCH_SIZE + block.y - 1)/block.y); for(int i = 0; i < ITERATIONS; i++) { checkCudaErrors(cufftExecR2C(fftPlan, (cufftReal*)_8bit_signal, tmp_result)); #ifndef JIT // Step 3 ConvolveAndStoreTransposedC_Optimized<<<grid, block>>>(tmp_result, result, filter); #else checkCudaErrors(cuFuncSetBlockShape(hKernel, TILE_DIM, BLOCK_ROWS, 1)); int paramOffset = 0; SET_KERNEL_PARAM(hKernel, tmp_result, paramOffset); SET_KERNEL_PARAM(hKernel, result, paramOffset); SET_KERNEL_PARAM(hKernel, filter, paramOffset); // Launch the kernel (Driver API_) checkCudaErrors(cuLaunchGrid(hKernel, (COMPLEX_SIGNAL_SIZE + block.x - 1)/block.x, (BATCH_SIZE + block.y - 1)/block.y)); printf("JIT CUDA kernel launched\n"); #endif checkCudaErrors(cudaGetLastError()); } #ifndef JIT checkCudaErrors(cudaEventRecord(end, 0)); checkCudaErrors(cudaEventSynchronize(end)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, end)); printf("Time for the FFT: %fms\n", elapsedTime); #endif checkCudaErrors(cudaGetLastError()); //Verify correct result if(postprocess(reference, result, COMPLEX_SIGNAL_SIZE * BATCH_SIZE)) { printf("Verification successful.\n"); } else { printf("!!! Verification Failed !!!\n"); } //Cleanup checkCudaErrors(cufftDestroy(fftPlan)); checkCudaErrors(cudaFree(_8bit_signal)); checkCudaErrors(cudaFree(result)); checkCudaErrors(cudaFree(filter)); checkCudaErrors(cudaFree(reference)); #ifdef JIT if (hModule) { checkCudaErrors(cuModuleUnload(hModule)); hModule = 0; } #endif //clean up driver state cudaDeviceReset(); printf("Done\n"); return 0; }
c66785dd99c81610703d833b679e0b48d0113b25.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "diagm_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int lengthA = 1; const double *a = NULL; hipMalloc(&a, XSIZE*YSIZE); double *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( diagm_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthA,a,b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( diagm_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthA,a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( diagm_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthA,a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c66785dd99c81610703d833b679e0b48d0113b25.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "diagm_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int lengthA = 1; const double *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); double *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); diagm_kernel<<<gridBlock,threadBlock>>>(lengthA,a,b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { diagm_kernel<<<gridBlock,threadBlock>>>(lengthA,a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { diagm_kernel<<<gridBlock,threadBlock>>>(lengthA,a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ad6c6f533f9558607a4f7b6cb471267d5c93ff21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/reduction_ops.h" #include "caffe2/utils/conversions.h" #include <hipcub/hipcub.hpp> namespace caffe2 { REGISTER_CUDA_OPERATOR(SumElements, SumElementsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SumElementsInt, SumElementsIntOp<int, CUDAContext>); REGISTER_CUDA_OPERATOR(SumSqrElements, SumSqrElementsOp<CUDAContext>); REGISTER_CUDA_OPERATOR(RowwiseMax, MaxReductionOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR(ColwiseMax, MaxReductionOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR( RowwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, true>) REGISTER_CUDA_OPERATOR( ColwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, false>) REGISTER_CUDA_OPERATOR( SumElementsGradient, SumElementsGradientOp<float, CUDAContext>); template <typename T> __global__ void SumElementsGradientKernel(bool average, const int N, const T* dY, T* dX) { const T value = average ? (*dY) / N : *dY; CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = value; } } __global__ void rowwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i / input_size / N; const int y_index = b_i * M + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } template <> bool SumSqrElementsOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float, float16>>::call(this, Input(0)); } __global__ void colwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i % input_size % N; const int y_index = b_i * N + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } template <> bool SumElementsGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& dY = Input(1); DCHECK_EQ(dY.size(), 1); auto* dX = Output(0); dX->ResizeLike(X); hipLaunchKernelGGL(( SumElementsGradientKernel<float>), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), average_, X.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } template <typename T, class Context, bool ROWWISE> bool MaxReductionGradientOp<T, Context, ROWWISE>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); auto* dX = Output(0); dX->ResizeLike(X); CAFFE_ENFORCE_EQ(X.ndim(), 3); const int batch_size = X.dim32(0); const int M = X.dim32(1); const int N = X.dim32(2); const T* Xdata = X.template data<T>(); const T* Ydata = Y.template data<T>(); const T* dYdata = dY.template data<T>(); T* dXdata = dX->template mutable_data<T>(); const int input_size = M * N; if (ROWWISE) { hipLaunchKernelGGL(( rowwise_max_gradient_kernel), dim3(CAFFE_GET_BLOCKS(batch_size * input_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } else { hipLaunchKernelGGL(( colwise_max_gradient_kernel), dim3(CAFFE_GET_BLOCKS(batch_size * input_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } return true; } } // namespace caffe2
ad6c6f533f9558607a4f7b6cb471267d5c93ff21.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/reduction_ops.h" #include "caffe2/utils/conversions.h" #include <cub/cub.cuh> namespace caffe2 { REGISTER_CUDA_OPERATOR(SumElements, SumElementsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SumElementsInt, SumElementsIntOp<int, CUDAContext>); REGISTER_CUDA_OPERATOR(SumSqrElements, SumSqrElementsOp<CUDAContext>); REGISTER_CUDA_OPERATOR(RowwiseMax, MaxReductionOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR(ColwiseMax, MaxReductionOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR( RowwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, true>) REGISTER_CUDA_OPERATOR( ColwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, false>) REGISTER_CUDA_OPERATOR( SumElementsGradient, SumElementsGradientOp<float, CUDAContext>); template <typename T> __global__ void SumElementsGradientKernel(bool average, const int N, const T* dY, T* dX) { const T value = average ? (*dY) / N : *dY; CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = value; } } __global__ void rowwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i / input_size / N; const int y_index = b_i * M + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } template <> bool SumSqrElementsOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float, float16>>::call(this, Input(0)); } __global__ void colwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i % input_size % N; const int y_index = b_i * N + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } template <> bool SumElementsGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& dY = Input(1); DCHECK_EQ(dY.size(), 1); auto* dX = Output(0); dX->ResizeLike(X); SumElementsGradientKernel<float><<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( average_, X.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } template <typename T, class Context, bool ROWWISE> bool MaxReductionGradientOp<T, Context, ROWWISE>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); auto* dX = Output(0); dX->ResizeLike(X); CAFFE_ENFORCE_EQ(X.ndim(), 3); const int batch_size = X.dim32(0); const int M = X.dim32(1); const int N = X.dim32(2); const T* Xdata = X.template data<T>(); const T* Ydata = Y.template data<T>(); const T* dYdata = dY.template data<T>(); T* dXdata = dX->template mutable_data<T>(); const int input_size = M * N; if (ROWWISE) { rowwise_max_gradient_kernel<<< CAFFE_GET_BLOCKS(batch_size * input_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } else { colwise_max_gradient_kernel<<< CAFFE_GET_BLOCKS(batch_size * input_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } return true; } } // namespace caffe2
0f657187c4f6a2236c4fff8e9a5dc2edfd7630ca.hip
// !!! This is a file automatically generated by hipify!!! // -I/data/vin/cmssw/slc7_amd64_gcc700/external/cub/1.8.0-gnimlf2/include/ // -I/cvmfs/cms.cern.ch/slc7_amd64_gcc630/external/cub/1.8.0-gnimlf2/include // [email protected] ; [email protected] // [email protected] #include <stdlib.h> #include <stddef.h> #include <string.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #ifdef USECUB #include "hipcub/hipcub.hpp" #endif // profile cuda kernels #define CUDA_PROFILING /*enable profiling */ #define CUDA_MAX_STREAMS 3 #ifdef CUDA_PROFILING #define INIT_CUDA_PROFILER \ hipEvent_t cuda_start_time[CUDA_MAX_STREAMS]; \ hipEvent_t cuda_stop_time[CUDA_MAX_STREAMS]; \ float cuda_run_time[CUDA_MAX_STREAMS]; \ int cuda_iter; \ for(cuda_iter=0; cuda_iter<CUDA_MAX_STREAMS; cuda_iter++) \ { \ hipEventCreate(&cuda_start_time[cuda_iter]); \ hipEventCreate(&cuda_stop_time[cuda_iter]); \ } #define DESTROY_CUDA_PROFILER \ for(cuda_iter=0; cuda_iter<CUDA_MAX_STREAMS; cuda_iter++) \ { \ hipEventDestroy( cuda_start_time[cuda_iter] ); \ hipEventDestroy( cuda_stop_time[cuda_iter] ); \ } #define START_CUDA_TIMING(stream) \ hipEventRecord( cuda_start_time[stream], stream ); #define STOP_CUDA_TIMING(stream) \ hipEventRecord( cuda_stop_time[stream], stream ); \ hipEventSynchronize( cuda_stop_time[stream] ); \ hipEventElapsedTime( &cuda_run_time[stream], cuda_start_time[stream], cuda_stop_time[stream] ); #define GET_CUDA_TIMING(stream,time) time=cuda_run_time[stream]; #define GET_CUDA_BANDWIDTH(stream, bytes_read, bytes_written, bandwidth) \ bandwidth=1.0e-6*(bytes_read+bytes_written)/cuda_run_time[stream]; // cuda time in milliseconds, want result in GB but start w/ Byte total #else #define INIT_CUDA_PROFILER #define DESTROY_CUDA_PROFILER #define START_CUDA_TIMING(stream) #define STOP_CUDA_TIMING(stream) #define GET_CUDA_TIMING(stream,time) #define GET_CUDA_BANDWIDTH(stream, bytes_read, bytes_written, bandwidth) #endif // +-------------------------------+ //----------------------| Reduce with Shuffling Kernels |---------------------- // +-------------------------------+ __device__ double atomicAddDouble(double* address, double val) { #if __CUDA_ARCH__ < 600 unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); #else return atomicAdd(address,val); #endif } __device__ inline double __shfl_down_double(double var, unsigned int srcLane, int width=32) { return __shfl_down_sync(0xffffffff,var, srcLane, width); /* int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<double*>(&a); */ } /* Reduce values within a warp * After execution, thread 0 has the total reduced value in it's variable */ __inline__ __device__ double warpReduceSum(double val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down_double(val, offset); return val; } /* Reduce values within a block * First, reduce values within each warp, then the first thread of * each warp writes its partial sum to shared memory. Finally, * after synchronizing, the first warp reads from shared memory and * reduces again. */ __inline__ __device__ double blockReduceSum(double val) { static __shared__ double shared[32]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; if (wid==0) val = warpReduceSum(val); //Final reduce within first warp return val; } /* Reduce across a complete grid. * Use a grid stride loop. The first pass generates and stores * partial reduction results. The second reduces the partial results * into a single total. */ __global__ void deviceReduceKernel(double *in, double* out, int N) { double sum = 0.0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum(sum); if (threadIdx.x==0) out[blockIdx.x]=sum; } void deviceReduce(double *in, double* out, int N) { int threads = 512; int blocks = min((N + threads - 1) / threads, 1024); hipLaunchKernelGGL(( deviceReduceKernel), dim3(blocks), dim3(threads), 0, 0, in, out, N); hipLaunchKernelGGL(( deviceReduceKernel), dim3(1), dim3(1024), 0, 0, out, out, blocks); } __global__ void deviceReduceKernelVector2(double * in, double * out, int N) { double sum = 0.0; int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < N / 2; i += blockDim.x * gridDim.x) { double2 val = reinterpret_cast<double2 *>(in)[ i ]; sum += val.x + val.y; } int i = idx + N / 2 * 2; if (i < N) sum += in[ i ]; sum = blockReduceSum(sum); if (threadIdx.x == 0) out[ blockIdx.x ] = sum; } void deviceReduceVector2(double * in, double * out, int N) { int threads = 512; int blocks = min((N / 2 + threads - 1) / threads, 1024); hipLaunchKernelGGL(( deviceReduceKernelVector2), dim3(blocks), dim3(threads), 0, 0, in, out, N); hipLaunchKernelGGL(( deviceReduceKernel), dim3(1), dim3(1024), 0, 0, out, out, blocks); } __global__ void deviceReduceKernelVector4(double * in, double * out, int N) { double sum = 0.0; int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < N / 4; i += blockDim.x * gridDim.x) { double4 val = reinterpret_cast<double4 *>(in)[ i ]; sum += (val.x + val.y) + (val.z + val.w); } int i = idx + N / 4 * 4; if (i < N) sum += in[ i ]; sum = blockReduceSum(sum); if (threadIdx.x == 0) out[ blockIdx.x ] = sum; } void deviceReduceVector4(double * in, double * out, int N) { int threads = 512; int blocks = min((N / 4 + threads - 1) / threads, 1024); hipLaunchKernelGGL(( deviceReduceKernelVector4), dim3(blocks), dim3(threads), 0, 0, in, out, N); hipLaunchKernelGGL(( deviceReduceKernel), dim3(1), dim3(1024), 0, 0, out, out, blocks); } /* Reduce across a complete grid using Atomic operations. * Reduce across the warp using shuffle, then have the first thread * of each warp atomically update the reduced value. */ __global__ void deviceReduceWarpAtomicKernel(double * in, double * out, int N) { double sum = double(0.0); for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = warpReduceSum(sum); if (threadIdx.x % warpSize == 0) atomicAddDouble(out, sum); } void deviceReduceWarpAtomic(double *in, double * out, int N) { int threads=256; int blocks=min((N+threads-1)/threads,2048); hipMemsetAsync(out, 0, sizeof(double)); hipLaunchKernelGGL(( deviceReduceWarpAtomicKernel), dim3(blocks),dim3(threads), 0, 0, in,out,N); } /* Reduce across a complete grid using Atomic operations. * Reduce across the warp using shuffle, then have the first thread * of each warp atomically update the reduced value. */ __global__ void deviceReduceWarpAtomicKernelVector2(double * in, double * out, int N) { double sum = double(0.0); int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int i = idx; i < N / 2; i += blockDim.x * gridDim.x) { double2 val = reinterpret_cast<double2 *>(in)[ i ]; sum += val.x + val.y; } int i = idx + N / 2 * 2; if (i < N) sum += in[ i ]; sum = warpReduceSum(sum); if (threadIdx.x % warpSize == 0) atomicAddDouble(out, sum); } void deviceReduceWarpAtomicVector2(double *in, double * out, int N) { int threads=256; int blocks=min((N/2+threads-1)/threads,2048); hipMemsetAsync(out, 0, sizeof(double)); hipLaunchKernelGGL(( deviceReduceWarpAtomicKernelVector2), dim3(blocks),dim3(threads), 0, 0, in,out,N); } /* Reduce across a complete grid using Atomic operations. * Reduce across the warp using shuffle, then have the first thread * of each warp atomically update the reduced value. */ __global__ void deviceReduceWarpAtomicKernelVector4(double * in, double * out, int N) { double sum = double(0.0); int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int i = idx; i < N / 4; i += blockDim.x * gridDim.x) { double4 val = reinterpret_cast<double4 *>(in)[ i ]; sum += (val.x + val.y) + (val.z + val.w); } int i = idx + N / 4 * 4; if (i < N) sum += in[ i ]; sum = warpReduceSum(sum); if (threadIdx.x % warpSize == 0) atomicAddDouble(out, sum); } void deviceReduceWarpAtomicVector4(double *in, double * out, int N) { int threads=256; int blocks=min((N/4+threads-1)/threads,2048); hipMemsetAsync(out, 0, sizeof(double)); hipLaunchKernelGGL(( deviceReduceWarpAtomicKernelVector4), dim3(blocks),dim3(threads), 0, 0, in,out,N); } // +-------------------------------+ //----------------------| End Adam's Reduction Kernels |---------------------- // +-------------------------------+ int opt_threads(int new_blocks, int threads, int current_size) { int new_threads; if ( new_blocks == 1 ) { new_threads = 2; while ( new_threads < threads ) { if ( new_threads >= current_size ) break; new_threads *= 2 ; } } else new_threads = threads ; return new_threads; } template <unsigned int blockSize> __device__ void warpReduce(volatile double *sdata, unsigned int tid) {// extended to block size 64 for warp case --factors of two here if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; //from 64 to 32 if ( tid < 16 ) sdata[tid] += sdata[tid + 16]; // from 32 to 16 if ( tid < 8 ) sdata[tid] += sdata[tid + 8]; // from 16 to 8 if ( tid < 4 ) sdata[tid] += sdata[tid + 4]; // from 8 to 4 if ( tid < 2 ) sdata[tid] += sdata[tid + 2]; // from 4 to 2 if ( tid == 0 ) sdata[tid] += sdata[tid + 1]; // from 2 to 1 // ... finished /* if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; */ } template <unsigned int blockSize> __global__ void __reduce_kernel__(double *g_idata, double *g_odata, int n) { extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int loff=i+blockDim.x; if ( loff < n ) sdata[tid] = g_idata[i] + g_idata[loff]; //let these threads load more than a single element else if ( i < n ) sdata[tid] = g_idata[i]; else sdata[tid] = (double)(0.0); __syncthreads(); if (blockSize >= 1024) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); } if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) warpReduce<blockSize> (sdata, tid) ; if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } void call_reduction_kernel(int blocks, int threads, int size, double *d_idata, double *d_odata) { //1st call: call_reduction_kernel(blocks, lthreads, size, array, partial_sums); int smemSize = threads * sizeof(double); switch ( threads ) { case 1024: hipLaunchKernelGGL(( __reduce_kernel__<1024>), dim3(blocks), dim3(threads), smemSize , 0, d_idata, d_odata, size); break; case 512: hipLaunchKernelGGL(( __reduce_kernel__< 512>), dim3(blocks), dim3(threads), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( __reduce_kernel__< 256>), dim3(blocks), dim3(threads), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( __reduce_kernel__< 128>), dim3(blocks), dim3(threads), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( __reduce_kernel__< 64>), dim3(blocks), dim3(threads), smemSize , 0, d_idata, d_odata, size); break; } } /* function does fast reduction (sum of elements) of array. result is located in partial_sums[0]. IF partial_sums == array then array contents will be destroyed */ int local_reduction( double * array , int size , double * partial_sums , int blocks , int threads ) { //call: local_reduction(gpu_workbuf, nxyz, gpu_workbuf, gpu_blocks, gpu_threads); // routine goes back and forth between the host and device unsigned int new_blocks, current_size; unsigned int lthreads = threads / 2 ; // threads should be power of 2 if ( lthreads < 64 ) lthreads = 64 ; //at least 2*warp_size // First reduction of the array call_reduction_kernel(blocks, lthreads, size, array, partial_sums); // Do iteratively reduction of partial_sums current_size = blocks; while ( current_size > 1 ) { new_blocks = (int)ceil((float)current_size/threads); lthreads = opt_threads( new_blocks , threads , current_size ) / 2 ; if ( lthreads < 64 ) lthreads=64; // at least 2*warp_size call_reduction_kernel( new_blocks , lthreads , current_size , partial_sums , partial_sums ) ; current_size = new_blocks ; } return 0; } int main ( int argc , char ** argv ) { int nxyz=256*256*256; hipError_t err; if (argc==2) nxyz = atoi(argv[1]); int gpu_threads=512; int gpu_blocks=(int)ceil((float)nxyz/gpu_threads); printf("GPU SETTING: THREADS=%d, BLOCKS=%d, THREADS*BLOCKS=%d, nxyz=%d\n",gpu_threads,gpu_blocks,gpu_threads*gpu_blocks,nxyz); // Buffers double * workbuf; double * gpu_workbuf; err = hipHostMalloc( (void **)&workbuf , nxyz*sizeof(double), hipHostMallocDefault ); if ( err!= hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__ , __FILE__ ) ; return 1; } err = hipMalloc( (void **)&gpu_workbuf , nxyz*sizeof(double) ); if ( err!= hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__ , __FILE__ ) ; return 1; } // Fill buffer with determnistic numbers double step = 1. ; int i ; for ( i=0; i<nxyz; i++ ) workbuf[i]=step*(i+1); // no cast here err = hipMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , hipMemcpyHostToDevice ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // gpu reduction double gpu_redu ; double *gpu_reduction_tmp ; err = hipMalloc( (void **)&gpu_reduction_tmp , gpu_blocks*sizeof(double) ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } INIT_CUDA_PROFILER; // do reduction START_CUDA_TIMING(0); //local_reduction(gpu_workbuf, nxyz, gpu_workbuf, gpu_blocks, gpu_threads); // routine goes back and forth between the host and device local_reduction(gpu_workbuf, nxyz, gpu_reduction_tmp, gpu_blocks, gpu_threads); // routine goes back and forth between the host and device STOP_CUDA_TIMING(0); // gather timing results double cuda_time , cuda_bandwidth ; GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("\n***Original reduce6 routines ***\n"); printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle printf("\n***With Shuffling ***\n"); printf("Now testing reduction with warp shuffle\n"); err = hipMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , hipMemcpyHostToDevice ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduce(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); int threads = 512; int blocks = min((nxyz + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and vectorization printf("Now testing reduction with warp shuffle and double2 vectorization\n"); err = hipMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , hipMemcpyHostToDevice ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceVector2(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/2 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and vectorization printf("Now testing reduction with warp shuffle and double4 vectorization\n"); err = hipMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , hipMemcpyHostToDevice ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceVector4(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and atomics printf("\n*** With Atomics ***\n"); printf("Now testing reduction with warp shuffle and atomics\n"); err = hipMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , hipMemcpyHostToDevice ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceWarpAtomic(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and atomics and vector2 printf("Now testing reduction with warp shuffle and atomics and double2\n"); err = hipMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , hipMemcpyHostToDevice ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceWarpAtomicVector2(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and atomics and vector4 printf("Now testing reduction with warp shuffle and atomics and double4\n"); err = hipMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , hipMemcpyHostToDevice ); if ( err != hipSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceWarpAtomicVector4(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); #ifdef USECUB printf("\n*** Now with CUB ***\n"); size_t temp_storage_bytes; double * temp_storage = NULL; hipcub::DeviceReduce::Reduce(temp_storage, temp_storage_bytes, gpu_workbuf, gpu_reduction_tmp, nxyz, hipcub::Sum(),0); hipMalloc(&temp_storage, temp_storage_bytes); hipDeviceSynchronize(); START_CUDA_TIMING(0); hipcub::DeviceReduce::Reduce(temp_storage, temp_storage_bytes, gpu_workbuf, gpu_reduction_tmp, nxyz, hipcub::Sum(),0); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=hipMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , hipMemcpyDeviceToHost ); err=hipMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , hipMemcpyDeviceToHost ); if(err!= hipSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); #endif DESTROY_CUDA_PROFILER ; return 0 ; }
0f657187c4f6a2236c4fff8e9a5dc2edfd7630ca.cu
// -I/data/vin/cmssw/slc7_amd64_gcc700/external/cub/1.8.0-gnimlf2/include/ // -I/cvmfs/cms.cern.ch/slc7_amd64_gcc630/external/cub/1.8.0-gnimlf2/include // [email protected] ; [email protected] // [email protected] #include <stdlib.h> #include <stddef.h> #include <string.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #ifdef USECUB #include "cub/cub.cuh" #endif // profile cuda kernels #define CUDA_PROFILING /*enable profiling */ #define CUDA_MAX_STREAMS 3 #ifdef CUDA_PROFILING #define INIT_CUDA_PROFILER \ cudaEvent_t cuda_start_time[CUDA_MAX_STREAMS]; \ cudaEvent_t cuda_stop_time[CUDA_MAX_STREAMS]; \ float cuda_run_time[CUDA_MAX_STREAMS]; \ int cuda_iter; \ for(cuda_iter=0; cuda_iter<CUDA_MAX_STREAMS; cuda_iter++) \ { \ cudaEventCreate(&cuda_start_time[cuda_iter]); \ cudaEventCreate(&cuda_stop_time[cuda_iter]); \ } #define DESTROY_CUDA_PROFILER \ for(cuda_iter=0; cuda_iter<CUDA_MAX_STREAMS; cuda_iter++) \ { \ cudaEventDestroy( cuda_start_time[cuda_iter] ); \ cudaEventDestroy( cuda_stop_time[cuda_iter] ); \ } #define START_CUDA_TIMING(stream) \ cudaEventRecord( cuda_start_time[stream], stream ); #define STOP_CUDA_TIMING(stream) \ cudaEventRecord( cuda_stop_time[stream], stream ); \ cudaEventSynchronize( cuda_stop_time[stream] ); \ cudaEventElapsedTime( &cuda_run_time[stream], cuda_start_time[stream], cuda_stop_time[stream] ); #define GET_CUDA_TIMING(stream,time) time=cuda_run_time[stream]; #define GET_CUDA_BANDWIDTH(stream, bytes_read, bytes_written, bandwidth) \ bandwidth=1.0e-6*(bytes_read+bytes_written)/cuda_run_time[stream]; // cuda time in milliseconds, want result in GB but start w/ Byte total #else #define INIT_CUDA_PROFILER #define DESTROY_CUDA_PROFILER #define START_CUDA_TIMING(stream) #define STOP_CUDA_TIMING(stream) #define GET_CUDA_TIMING(stream,time) #define GET_CUDA_BANDWIDTH(stream, bytes_read, bytes_written, bandwidth) #endif // +-------------------------------+ //----------------------| Reduce with Shuffling Kernels |---------------------- // +-------------------------------+ __device__ double atomicAddDouble(double* address, double val) { #if __CUDA_ARCH__ < 600 unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); #else return atomicAdd(address,val); #endif } __device__ inline double __shfl_down_double(double var, unsigned int srcLane, int width=32) { return __shfl_down_sync(0xffffffff,var, srcLane, width); /* int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<double*>(&a); */ } /* Reduce values within a warp * After execution, thread 0 has the total reduced value in it's variable */ __inline__ __device__ double warpReduceSum(double val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down_double(val, offset); return val; } /* Reduce values within a block * First, reduce values within each warp, then the first thread of * each warp writes its partial sum to shared memory. Finally, * after synchronizing, the first warp reads from shared memory and * reduces again. */ __inline__ __device__ double blockReduceSum(double val) { static __shared__ double shared[32]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; if (wid==0) val = warpReduceSum(val); //Final reduce within first warp return val; } /* Reduce across a complete grid. * Use a grid stride loop. The first pass generates and stores * partial reduction results. The second reduces the partial results * into a single total. */ __global__ void deviceReduceKernel(double *in, double* out, int N) { double sum = 0.0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum(sum); if (threadIdx.x==0) out[blockIdx.x]=sum; } void deviceReduce(double *in, double* out, int N) { int threads = 512; int blocks = min((N + threads - 1) / threads, 1024); deviceReduceKernel<<<blocks, threads>>>(in, out, N); deviceReduceKernel<<<1, 1024>>>(out, out, blocks); } __global__ void deviceReduceKernelVector2(double * in, double * out, int N) { double sum = 0.0; int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < N / 2; i += blockDim.x * gridDim.x) { double2 val = reinterpret_cast<double2 *>(in)[ i ]; sum += val.x + val.y; } int i = idx + N / 2 * 2; if (i < N) sum += in[ i ]; sum = blockReduceSum(sum); if (threadIdx.x == 0) out[ blockIdx.x ] = sum; } void deviceReduceVector2(double * in, double * out, int N) { int threads = 512; int blocks = min((N / 2 + threads - 1) / threads, 1024); deviceReduceKernelVector2<<<blocks, threads>>>(in, out, N); deviceReduceKernel<<<1, 1024>>>(out, out, blocks); } __global__ void deviceReduceKernelVector4(double * in, double * out, int N) { double sum = 0.0; int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < N / 4; i += blockDim.x * gridDim.x) { double4 val = reinterpret_cast<double4 *>(in)[ i ]; sum += (val.x + val.y) + (val.z + val.w); } int i = idx + N / 4 * 4; if (i < N) sum += in[ i ]; sum = blockReduceSum(sum); if (threadIdx.x == 0) out[ blockIdx.x ] = sum; } void deviceReduceVector4(double * in, double * out, int N) { int threads = 512; int blocks = min((N / 4 + threads - 1) / threads, 1024); deviceReduceKernelVector4<<<blocks, threads>>>(in, out, N); deviceReduceKernel<<<1, 1024>>>(out, out, blocks); } /* Reduce across a complete grid using Atomic operations. * Reduce across the warp using shuffle, then have the first thread * of each warp atomically update the reduced value. */ __global__ void deviceReduceWarpAtomicKernel(double * in, double * out, int N) { double sum = double(0.0); for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = warpReduceSum(sum); if (threadIdx.x % warpSize == 0) atomicAddDouble(out, sum); } void deviceReduceWarpAtomic(double *in, double * out, int N) { int threads=256; int blocks=min((N+threads-1)/threads,2048); cudaMemsetAsync(out, 0, sizeof(double)); deviceReduceWarpAtomicKernel<<<blocks,threads>>>(in,out,N); } /* Reduce across a complete grid using Atomic operations. * Reduce across the warp using shuffle, then have the first thread * of each warp atomically update the reduced value. */ __global__ void deviceReduceWarpAtomicKernelVector2(double * in, double * out, int N) { double sum = double(0.0); int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int i = idx; i < N / 2; i += blockDim.x * gridDim.x) { double2 val = reinterpret_cast<double2 *>(in)[ i ]; sum += val.x + val.y; } int i = idx + N / 2 * 2; if (i < N) sum += in[ i ]; sum = warpReduceSum(sum); if (threadIdx.x % warpSize == 0) atomicAddDouble(out, sum); } void deviceReduceWarpAtomicVector2(double *in, double * out, int N) { int threads=256; int blocks=min((N/2+threads-1)/threads,2048); cudaMemsetAsync(out, 0, sizeof(double)); deviceReduceWarpAtomicKernelVector2<<<blocks,threads>>>(in,out,N); } /* Reduce across a complete grid using Atomic operations. * Reduce across the warp using shuffle, then have the first thread * of each warp atomically update the reduced value. */ __global__ void deviceReduceWarpAtomicKernelVector4(double * in, double * out, int N) { double sum = double(0.0); int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int i = idx; i < N / 4; i += blockDim.x * gridDim.x) { double4 val = reinterpret_cast<double4 *>(in)[ i ]; sum += (val.x + val.y) + (val.z + val.w); } int i = idx + N / 4 * 4; if (i < N) sum += in[ i ]; sum = warpReduceSum(sum); if (threadIdx.x % warpSize == 0) atomicAddDouble(out, sum); } void deviceReduceWarpAtomicVector4(double *in, double * out, int N) { int threads=256; int blocks=min((N/4+threads-1)/threads,2048); cudaMemsetAsync(out, 0, sizeof(double)); deviceReduceWarpAtomicKernelVector4<<<blocks,threads>>>(in,out,N); } // +-------------------------------+ //----------------------| End Adam's Reduction Kernels |---------------------- // +-------------------------------+ int opt_threads(int new_blocks, int threads, int current_size) { int new_threads; if ( new_blocks == 1 ) { new_threads = 2; while ( new_threads < threads ) { if ( new_threads >= current_size ) break; new_threads *= 2 ; } } else new_threads = threads ; return new_threads; } template <unsigned int blockSize> __device__ void warpReduce(volatile double *sdata, unsigned int tid) {// extended to block size 64 for warp case --factors of two here if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; //from 64 to 32 if ( tid < 16 ) sdata[tid] += sdata[tid + 16]; // from 32 to 16 if ( tid < 8 ) sdata[tid] += sdata[tid + 8]; // from 16 to 8 if ( tid < 4 ) sdata[tid] += sdata[tid + 4]; // from 8 to 4 if ( tid < 2 ) sdata[tid] += sdata[tid + 2]; // from 4 to 2 if ( tid == 0 ) sdata[tid] += sdata[tid + 1]; // from 2 to 1 // ... finished /* if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; */ } template <unsigned int blockSize> __global__ void __reduce_kernel__(double *g_idata, double *g_odata, int n) { extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int loff=i+blockDim.x; if ( loff < n ) sdata[tid] = g_idata[i] + g_idata[loff]; //let these threads load more than a single element else if ( i < n ) sdata[tid] = g_idata[i]; else sdata[tid] = (double)(0.0); __syncthreads(); if (blockSize >= 1024) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); } if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) warpReduce<blockSize> (sdata, tid) ; if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } void call_reduction_kernel(int blocks, int threads, int size, double *d_idata, double *d_odata) { //1st call: call_reduction_kernel(blocks, lthreads, size, array, partial_sums); int smemSize = threads * sizeof(double); switch ( threads ) { case 1024: __reduce_kernel__<1024><<< blocks, threads, smemSize >>>(d_idata, d_odata, size); break; case 512: __reduce_kernel__< 512><<< blocks, threads, smemSize >>>(d_idata, d_odata, size); break; case 256: __reduce_kernel__< 256><<< blocks, threads, smemSize >>>(d_idata, d_odata, size); break; case 128: __reduce_kernel__< 128><<< blocks, threads, smemSize >>>(d_idata, d_odata, size); break; case 64: __reduce_kernel__< 64><<< blocks, threads, smemSize >>>(d_idata, d_odata, size); break; } } /* function does fast reduction (sum of elements) of array. result is located in partial_sums[0]. IF partial_sums == array then array contents will be destroyed */ int local_reduction( double * array , int size , double * partial_sums , int blocks , int threads ) { //call: local_reduction(gpu_workbuf, nxyz, gpu_workbuf, gpu_blocks, gpu_threads); // routine goes back and forth between the host and device unsigned int new_blocks, current_size; unsigned int lthreads = threads / 2 ; // threads should be power of 2 if ( lthreads < 64 ) lthreads = 64 ; //at least 2*warp_size // First reduction of the array call_reduction_kernel(blocks, lthreads, size, array, partial_sums); // Do iteratively reduction of partial_sums current_size = blocks; while ( current_size > 1 ) { new_blocks = (int)ceil((float)current_size/threads); lthreads = opt_threads( new_blocks , threads , current_size ) / 2 ; if ( lthreads < 64 ) lthreads=64; // at least 2*warp_size call_reduction_kernel( new_blocks , lthreads , current_size , partial_sums , partial_sums ) ; current_size = new_blocks ; } return 0; } int main ( int argc , char ** argv ) { int nxyz=256*256*256; cudaError err; if (argc==2) nxyz = atoi(argv[1]); int gpu_threads=512; int gpu_blocks=(int)ceil((float)nxyz/gpu_threads); printf("GPU SETTING: THREADS=%d, BLOCKS=%d, THREADS*BLOCKS=%d, nxyz=%d\n",gpu_threads,gpu_blocks,gpu_threads*gpu_blocks,nxyz); // Buffers double * workbuf; double * gpu_workbuf; err = cudaHostAlloc( (void **)&workbuf , nxyz*sizeof(double), cudaHostAllocDefault ); if ( err!= cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__ , __FILE__ ) ; return 1; } err = cudaMalloc( (void **)&gpu_workbuf , nxyz*sizeof(double) ); if ( err!= cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__ , __FILE__ ) ; return 1; } // Fill buffer with determnistic numbers double step = 1. ; int i ; for ( i=0; i<nxyz; i++ ) workbuf[i]=step*(i+1); // no cast here err = cudaMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // gpu reduction double gpu_redu ; double *gpu_reduction_tmp ; err = cudaMalloc( (void **)&gpu_reduction_tmp , gpu_blocks*sizeof(double) ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } INIT_CUDA_PROFILER; // do reduction START_CUDA_TIMING(0); //local_reduction(gpu_workbuf, nxyz, gpu_workbuf, gpu_blocks, gpu_threads); // routine goes back and forth between the host and device local_reduction(gpu_workbuf, nxyz, gpu_reduction_tmp, gpu_blocks, gpu_threads); // routine goes back and forth between the host and device STOP_CUDA_TIMING(0); // gather timing results double cuda_time , cuda_bandwidth ; GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("\n***Original reduce6 routines ***\n"); printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle printf("\n***With Shuffling ***\n"); printf("Now testing reduction with warp shuffle\n"); err = cudaMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduce(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); int threads = 512; int blocks = min((nxyz + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and vectorization printf("Now testing reduction with warp shuffle and double2 vectorization\n"); err = cudaMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceVector2(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/2 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and vectorization printf("Now testing reduction with warp shuffle and double4 vectorization\n"); err = cudaMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceVector4(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and atomics printf("\n*** With Atomics ***\n"); printf("Now testing reduction with warp shuffle and atomics\n"); err = cudaMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceWarpAtomic(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and atomics and vector2 printf("Now testing reduction with warp shuffle and atomics and double2\n"); err = cudaMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceWarpAtomicVector2(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); // Do it all over again for the reduce with shuffle and atomics and vector4 printf("Now testing reduction with warp shuffle and atomics and double4\n"); err = cudaMemcpy( gpu_workbuf , workbuf , nxyz*sizeof(double) , cudaMemcpyHostToDevice ); if ( err != cudaSuccess ) { printf("ERROR: (line %d in %s)\n", __LINE__, __FILE__); return 1; } // do reduction START_CUDA_TIMING(0); deviceReduceWarpAtomicVector4(gpu_workbuf, gpu_reduction_tmp, nxyz); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); #ifdef USECUB printf("\n*** Now with CUB ***\n"); size_t temp_storage_bytes; double * temp_storage = NULL; cub::DeviceReduce::Reduce(temp_storage, temp_storage_bytes, gpu_workbuf, gpu_reduction_tmp, nxyz, cub::Sum(),0); cudaMalloc(&temp_storage, temp_storage_bytes); cudaDeviceSynchronize(); START_CUDA_TIMING(0); cub::DeviceReduce::Reduce(temp_storage, temp_storage_bytes, gpu_workbuf, gpu_reduction_tmp, nxyz, cub::Sum(),0); STOP_CUDA_TIMING(0); threads = 512; blocks = min((nxyz/4 + threads - 1) / threads, 1024); // gather timing results GET_CUDA_TIMING( 0 , cuda_time ) ; GET_CUDA_BANDWIDTH( 0 , nxyz*sizeof(double) , gpu_blocks*sizeof(double) , cuda_bandwidth ) ; printf("CUDA: TIME=%fms, BANDWIDTH=%fGB/s\n", cuda_time, cuda_bandwidth ) ; // copy result and check it //err=cudaMemcpy( &gpu_redu , gpu_workbuf , sizeof(double) , cudaMemcpyDeviceToHost ); err=cudaMemcpy( &gpu_redu , gpu_reduction_tmp , sizeof(double) , cudaMemcpyDeviceToHost ); if(err!= cudaSuccess) { printf("ERROR: (line %d in %s), err=%d\n", __LINE__, __FILE__, err); return 1; } printf("THEORY:\t %.8f\n",0.5*nxyz*(workbuf[0]+workbuf[nxyz-1])); printf("GPU: \t %.8f\n",gpu_redu); #endif DESTROY_CUDA_PROFILER ; return 0 ; }
17f0879db2e44a27674da792cc5f8da5ae1db171.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "transform.cuh" #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/operators.cuh> #include <catboost/libs/cuda_wrappers/arch.cuh> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> namespace NKernel { template <typename T> __global__ void AddVectorImpl(T *x, const T *y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T y0 = __ldg(y + i); const T x0 = __ldg(x + i); const T r0 = y0 + x0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void AddVector(T *x, const T *y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void AddVectorImpl(T *x, const T y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T x0 = __ldg(x + i); const T r0 = y + x0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void AddVector(T *x, const T y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void SubtractVectorImpl(T *x, const T *y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T y0 = __ldg(y + i); const T x0 = __ldg(x + i); const T r0 = x0 - y0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> __global__ void SubtractVectorImpl(T *x, const T y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T x0 = __ldg(x + i); const T r0 = x0 - y; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void SubtractVector(T *x, const T *y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> void SubtractVector(T *x, const T y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void MultiplyVectorImpl(T *x, const T *y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T y0 = __ldg(y + i); const T x0 = __ldg(x + i); const T r0 = y0 * x0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void MultiplyVector(T *x, const T *y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void MultiplyVectorImpl(T *x, const T c, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T x0 = __ldg(x + i); T r0 = x0 * c; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void MultiplyVector(T *x, const T c, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, c, size); } template <typename T> __global__ void DivideVectorImpl(T *x, const T *y, bool skipZeroes, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T x0 = x[i]; T y0 = y[i]; T r0 = ZeroAwareDivide(x0, y0, skipZeroes); x[i] = r0; i += gridDim.x * blockDim.x; } } template <typename T> __global__ void DivideVectorImpl(T *x, const T y, bool skipZeroes, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T x0 = x[i]; T r0 = ZeroAwareDivide(x0, y, skipZeroes); x[i] = r0; i += gridDim.x * blockDim.x; } } template <typename T> void DivideVector(T *x, const T *y, ui64 size, bool skipZeroes, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size); } template <typename T> void DivideVector(T *x, const T y, ui64 size, bool skipZeroes, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size); } template <typename T> __global__ void ExpVectorImpl(T *x, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T val = __ldg(x + i); x[i] = __expf(val); i += gridDim.x * blockDim.x; } } template <typename T> void ExpVector(T *x, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); ExpVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, size); } template <typename T, typename Index> __global__ void GatherImpl(T *dst, const T *src, const Index *map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = __ldg(map + i); for (int column = 0; column < columnCount; ++column) { WriteThrough(dst + i + column * dstColumnAlignSize, StreamLoad(src + m + column * srcColumnAlignSize)); } i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void Gather(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) { const ui64 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { GatherImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize); } } template <typename T, typename Index> __global__ void GatherWithMaskImpl(T *dst, const T *src, const Index *map, Index size, Index mask) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = StreamLoad(map + i) & mask; WriteThrough(dst + i, StreamLoad(src + m)); i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void GatherWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) { const ui64 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { GatherWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask); } } template <typename T, typename Index> __global__ void ScatterImpl(T* dst, const T* src, const Index* map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnALignSize) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = __ldg(map + i); for (int column = 0; column < columnCount; ++column) { WriteThrough(dst + m + dstColumnAlignSize * column, StreamLoad(src + i + srcColumnALignSize * column)); } i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void Scatter(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) { const ui32 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { ScatterImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize); } } template <typename T, typename Index> __global__ void ScatterWithMaskImpl(T* dst, const T* src, const Index* map, Index size, Index mask) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = StreamLoad(map + i) & mask; WriteThrough(dst + m, StreamLoad(src + i)); i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void ScatterWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) { const ui32 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { ScatterWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask); } } template <typename T> __global__ void ReverseImpl(T *data, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; ui64 half = size / 2; while (i < half) { T a = data[i]; T b = data[size - i - 1]; data[i] = b; data[size - i - 1] = a; i += gridDim.x * blockDim.x; } } template <typename T> void Reverse(T* data, ui64 size, TCudaStream stream) { const ui32 blockSize = 256; const ui64 numBlocks = min(((size + 1) / 2 + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); ReverseImpl<T> << < numBlocks, blockSize, 0, stream >> > (data, size); } #define BIN_OP_VECTOR_TEMPL(Type) \ template void AddVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\ template void AddVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\ template void SubtractVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\ template void SubtractVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream); \ template void MultiplyVector<Type>(Type *x, const Type* y, ui64 size, TCudaStream stream);\ template void MultiplyVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\ template void DivideVector<Type>(Type *x, const Type* y, ui64 size, bool skipZeroes, TCudaStream stream);\ template void DivideVector<Type>(Type *x, Type y, ui64 size, bool skipZeroes, TCudaStream stream);\ BIN_OP_VECTOR_TEMPL(int) BIN_OP_VECTOR_TEMPL(float) BIN_OP_VECTOR_TEMPL(ui32) BIN_OP_VECTOR_TEMPL(double) BIN_OP_VECTOR_TEMPL(ui8) BIN_OP_VECTOR_TEMPL(uint2) BIN_OP_VECTOR_TEMPL(ui16) #define FUNC_VECTOR_TEMPL(Type) \ template void ExpVector<Type>(Type *x, ui64 size, TCudaStream stream);\ FUNC_VECTOR_TEMPL(float) #define GATHER_SCATTER_TEMPL(Type, IndexType) \ template void Gather<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int columntCount, ui64, ui64, TCudaStream stream); \ template void Scatter<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int, ui64, ui64, TCudaStream stream); \ template void GatherWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); \ template void ScatterWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); GATHER_SCATTER_TEMPL(int, ui32) GATHER_SCATTER_TEMPL(ui8, ui32) GATHER_SCATTER_TEMPL(uint2, ui32) GATHER_SCATTER_TEMPL(ui32, ui32) GATHER_SCATTER_TEMPL(float, ui32) GATHER_SCATTER_TEMPL(bool, ui32) #define REVERSE_VECTOR_TEMPL(Type) \ template void Reverse<Type>(Type *x, ui64 size, TCudaStream stream); REVERSE_VECTOR_TEMPL(char) REVERSE_VECTOR_TEMPL(float) REVERSE_VECTOR_TEMPL(unsigned char) REVERSE_VECTOR_TEMPL(short) REVERSE_VECTOR_TEMPL(ui16) REVERSE_VECTOR_TEMPL(int) REVERSE_VECTOR_TEMPL(ui32) // PowVector template <typename T> __global__ void PowVectorImpl(T* const x, const T base, const ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { x[i] = pow(base, x[i]); i += gridDim.x * blockDim.x; } } template <typename T> void PowVector(T* const x, const ui64 size, const T base, const TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = Min( (size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); hipLaunchKernelGGL(( PowVectorImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, x, base, size); } #define Y_CATBOOST_CUDA_F_IMPL(T) \ template void PowVector<T>(T* x, ui64 size, T base, TCudaStream stream); Y_MAP_ARGS( Y_CATBOOST_CUDA_F_IMPL, float); #undef Y_CATBOOST_CUDA_F_IMPL // PowVector template <typename T> __global__ void PowVectorImpl(const T* const x, const T base, const ui64 size, T* y) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { y[i] = pow(base, x[i]); i += gridDim.x * blockDim.x; } } template <typename T> void PowVector(const T* x, const ui64 size, const T base, T* y, const TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = Min( (size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); hipLaunchKernelGGL(( PowVectorImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, x, base, size, y); } #define Y_CATBOOST_CUDA_F_IMPL(T) \ template void PowVector<T>(const T* x, ui64 size, T base, T* y, TCudaStream stream); Y_MAP_ARGS( Y_CATBOOST_CUDA_F_IMPL, float); #undef Y_CATBOOST_CUDA_F_IMPL }
17f0879db2e44a27674da792cc5f8da5ae1db171.cu
#include "transform.cuh" #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/operators.cuh> #include <catboost/libs/cuda_wrappers/arch.cuh> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> namespace NKernel { template <typename T> __global__ void AddVectorImpl(T *x, const T *y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T y0 = __ldg(y + i); const T x0 = __ldg(x + i); const T r0 = y0 + x0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void AddVector(T *x, const T *y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void AddVectorImpl(T *x, const T y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T x0 = __ldg(x + i); const T r0 = y + x0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void AddVector(T *x, const T y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void SubtractVectorImpl(T *x, const T *y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T y0 = __ldg(y + i); const T x0 = __ldg(x + i); const T r0 = x0 - y0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> __global__ void SubtractVectorImpl(T *x, const T y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T x0 = __ldg(x + i); const T r0 = x0 - y; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void SubtractVector(T *x, const T *y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> void SubtractVector(T *x, const T y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void MultiplyVectorImpl(T *x, const T *y, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { const T y0 = __ldg(y + i); const T x0 = __ldg(x + i); const T r0 = y0 * x0; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void MultiplyVector(T *x, const T *y, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size); } template <typename T> __global__ void MultiplyVectorImpl(T *x, const T c, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T x0 = __ldg(x + i); T r0 = x0 * c; WriteThrough(x + i, r0); i += gridDim.x * blockDim.x; } } template <typename T> void MultiplyVector(T *x, const T c, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, c, size); } template <typename T> __global__ void DivideVectorImpl(T *x, const T *y, bool skipZeroes, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T x0 = x[i]; T y0 = y[i]; T r0 = ZeroAwareDivide(x0, y0, skipZeroes); x[i] = r0; i += gridDim.x * blockDim.x; } } template <typename T> __global__ void DivideVectorImpl(T *x, const T y, bool skipZeroes, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T x0 = x[i]; T r0 = ZeroAwareDivide(x0, y, skipZeroes); x[i] = r0; i += gridDim.x * blockDim.x; } } template <typename T> void DivideVector(T *x, const T *y, ui64 size, bool skipZeroes, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size); } template <typename T> void DivideVector(T *x, const T y, ui64 size, bool skipZeroes, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size); } template <typename T> __global__ void ExpVectorImpl(T *x, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { T val = __ldg(x + i); x[i] = __expf(val); i += gridDim.x * blockDim.x; } } template <typename T> void ExpVector(T *x, ui64 size, TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); ExpVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, size); } template <typename T, typename Index> __global__ void GatherImpl(T *dst, const T *src, const Index *map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = __ldg(map + i); for (int column = 0; column < columnCount; ++column) { WriteThrough(dst + i + column * dstColumnAlignSize, StreamLoad(src + m + column * srcColumnAlignSize)); } i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void Gather(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) { const ui64 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { GatherImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize); } } template <typename T, typename Index> __global__ void GatherWithMaskImpl(T *dst, const T *src, const Index *map, Index size, Index mask) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = StreamLoad(map + i) & mask; WriteThrough(dst + i, StreamLoad(src + m)); i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void GatherWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) { const ui64 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { GatherWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask); } } template <typename T, typename Index> __global__ void ScatterImpl(T* dst, const T* src, const Index* map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnALignSize) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = __ldg(map + i); for (int column = 0; column < columnCount; ++column) { WriteThrough(dst + m + dstColumnAlignSize * column, StreamLoad(src + i + srcColumnALignSize * column)); } i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void Scatter(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) { const ui32 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { ScatterImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize); } } template <typename T, typename Index> __global__ void ScatterWithMaskImpl(T* dst, const T* src, const Index* map, Index size, Index mask) { Index i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { Index m = StreamLoad(map + i) & mask; WriteThrough(dst + m, StreamLoad(src + i)); i += gridDim.x * blockDim.x; } } template <typename T, typename Index> void ScatterWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) { const ui32 blockSize = 256; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); if (numBlocks) { ScatterWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask); } } template <typename T> __global__ void ReverseImpl(T *data, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; ui64 half = size / 2; while (i < half) { T a = data[i]; T b = data[size - i - 1]; data[i] = b; data[size - i - 1] = a; i += gridDim.x * blockDim.x; } } template <typename T> void Reverse(T* data, ui64 size, TCudaStream stream) { const ui32 blockSize = 256; const ui64 numBlocks = min(((size + 1) / 2 + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); ReverseImpl<T> << < numBlocks, blockSize, 0, stream >> > (data, size); } #define BIN_OP_VECTOR_TEMPL(Type) \ template void AddVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\ template void AddVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\ template void SubtractVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\ template void SubtractVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream); \ template void MultiplyVector<Type>(Type *x, const Type* y, ui64 size, TCudaStream stream);\ template void MultiplyVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\ template void DivideVector<Type>(Type *x, const Type* y, ui64 size, bool skipZeroes, TCudaStream stream);\ template void DivideVector<Type>(Type *x, Type y, ui64 size, bool skipZeroes, TCudaStream stream);\ BIN_OP_VECTOR_TEMPL(int) BIN_OP_VECTOR_TEMPL(float) BIN_OP_VECTOR_TEMPL(ui32) BIN_OP_VECTOR_TEMPL(double) BIN_OP_VECTOR_TEMPL(ui8) BIN_OP_VECTOR_TEMPL(uint2) BIN_OP_VECTOR_TEMPL(ui16) #define FUNC_VECTOR_TEMPL(Type) \ template void ExpVector<Type>(Type *x, ui64 size, TCudaStream stream);\ FUNC_VECTOR_TEMPL(float) #define GATHER_SCATTER_TEMPL(Type, IndexType) \ template void Gather<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int columntCount, ui64, ui64, TCudaStream stream); \ template void Scatter<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int, ui64, ui64, TCudaStream stream); \ template void GatherWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); \ template void ScatterWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); GATHER_SCATTER_TEMPL(int, ui32) GATHER_SCATTER_TEMPL(ui8, ui32) GATHER_SCATTER_TEMPL(uint2, ui32) GATHER_SCATTER_TEMPL(ui32, ui32) GATHER_SCATTER_TEMPL(float, ui32) GATHER_SCATTER_TEMPL(bool, ui32) #define REVERSE_VECTOR_TEMPL(Type) \ template void Reverse<Type>(Type *x, ui64 size, TCudaStream stream); REVERSE_VECTOR_TEMPL(char) REVERSE_VECTOR_TEMPL(float) REVERSE_VECTOR_TEMPL(unsigned char) REVERSE_VECTOR_TEMPL(short) REVERSE_VECTOR_TEMPL(ui16) REVERSE_VECTOR_TEMPL(int) REVERSE_VECTOR_TEMPL(ui32) // PowVector template <typename T> __global__ void PowVectorImpl(T* const x, const T base, const ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { x[i] = pow(base, x[i]); i += gridDim.x * blockDim.x; } } template <typename T> void PowVector(T* const x, const ui64 size, const T base, const TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = Min( (size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); PowVectorImpl<T><<<numBlocks, blockSize, 0, stream>>>(x, base, size); } #define Y_CATBOOST_CUDA_F_IMPL(T) \ template void PowVector<T>(T* x, ui64 size, T base, TCudaStream stream); Y_MAP_ARGS( Y_CATBOOST_CUDA_F_IMPL, float); #undef Y_CATBOOST_CUDA_F_IMPL // PowVector template <typename T> __global__ void PowVectorImpl(const T* const x, const T base, const ui64 size, T* y) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { y[i] = pow(base, x[i]); i += gridDim.x * blockDim.x; } } template <typename T> void PowVector(const T* x, const ui64 size, const T base, T* y, const TCudaStream stream) { const ui32 blockSize = 512; const ui64 numBlocks = Min( (size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); PowVectorImpl<T><<<numBlocks, blockSize, 0, stream>>>(x, base, size, y); } #define Y_CATBOOST_CUDA_F_IMPL(T) \ template void PowVector<T>(const T* x, ui64 size, T base, T* y, TCudaStream stream); Y_MAP_ARGS( Y_CATBOOST_CUDA_F_IMPL, float); #undef Y_CATBOOST_CUDA_F_IMPL }
57c521a8e67e61b843c1d485ce3e3e36cf7dcf45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/flip_op.h" #include <vector> #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/complex.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; template <typename T> __global__ void flip_cuda_kernel(const int N, const T* in_data, T* out_data, int64_t* x_shape, int64_t* x_stride, int* flip_dims, int flip_dims_size, int total_dims) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } int cur_indices = idx, rem = 0, dst_offset = 0; for (int i = 0; i < total_dims; ++i) { int64_t temp = cur_indices; cur_indices = cur_indices / x_stride[i]; rem = temp - cur_indices * x_stride[i]; // flip the indices if it is in flip_dims for (int j = 0; j < flip_dims_size; ++j) { if (i == flip_dims[j]) { cur_indices = x_shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * x_stride[i]; cur_indices = rem; } out_data[idx] = in_data[dst_offset]; } template <typename T> class FlipKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()); auto cplace = platform::CPUPlace(); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const Tensor* x = ctx.Input<Tensor>("X"); Tensor* out = ctx.Output<Tensor>("Out"); auto* in_data = x->data<T>(); auto* out_data = out->mutable_data<T>(ctx.GetPlace()); auto flip_dims = ctx.template Attr<std::vector<int>>("axis"); const int flip_dims_size = static_cast<int>(flip_dims.size()); auto x_dims = x->dims(); const int total_dims = x_dims.size(); const int N = x->numel(); int block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); for (size_t i = 0; i < flip_dims.size(); ++i) { if (flip_dims[i] < 0) { flip_dims[i] += total_dims; } } auto x_stride = framework::stride(x_dims); std::vector<int64_t> x_dims_v = framework::vectorize(x_dims); std::vector<int64_t> x_stride_v = framework::vectorize(x_stride); int bytes = total_dims * sizeof(int64_t); auto x_strides_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_strides_array_gpu = reinterpret_cast<int64_t*>(x_strides_array_tmp->ptr()); memory::Copy(gplace, x_strides_array_gpu, cplace, x_stride_v.data(), bytes, dev_ctx.stream()); auto x_shape_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_shape_array_gpu = reinterpret_cast<int64_t*>(x_shape_array_tmp->ptr()); memory::Copy(gplace, x_shape_array_gpu, cplace, x_dims_v.data(), bytes, dev_ctx.stream()); bytes = flip_dims_size * sizeof(int); auto flip_dims_array_tmp = memory::Alloc(dev_ctx, bytes); int* flip_dims_array_gpu = reinterpret_cast<int*>(flip_dims_array_tmp->ptr()); memory::Copy(gplace, flip_dims_array_gpu, cplace, flip_dims.data(), bytes, dev_ctx.stream()); hipLaunchKernelGGL(( flip_cuda_kernel< T>), dim3(dim_grid), dim3(dim_block), 0, ctx.cuda_device_context().stream(), N, in_data, out_data, x_shape_array_gpu, x_strides_array_gpu, flip_dims_array_gpu, flip_dims_size, total_dims); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( flip, ops::FlipKernel<paddle::platform::CUDADeviceContext, float>, ops::FlipKernel<paddle::platform::CUDADeviceContext, double>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::FlipKernel<paddle::platform::CUDADeviceContext, bool>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::complex<float>>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::complex<double>>);
57c521a8e67e61b843c1d485ce3e3e36cf7dcf45.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/flip_op.h" #include <vector> #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/complex.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; template <typename T> __global__ void flip_cuda_kernel(const int N, const T* in_data, T* out_data, int64_t* x_shape, int64_t* x_stride, int* flip_dims, int flip_dims_size, int total_dims) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } int cur_indices = idx, rem = 0, dst_offset = 0; for (int i = 0; i < total_dims; ++i) { int64_t temp = cur_indices; cur_indices = cur_indices / x_stride[i]; rem = temp - cur_indices * x_stride[i]; // flip the indices if it is in flip_dims for (int j = 0; j < flip_dims_size; ++j) { if (i == flip_dims[j]) { cur_indices = x_shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * x_stride[i]; cur_indices = rem; } out_data[idx] = in_data[dst_offset]; } template <typename T> class FlipKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()); auto cplace = platform::CPUPlace(); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const Tensor* x = ctx.Input<Tensor>("X"); Tensor* out = ctx.Output<Tensor>("Out"); auto* in_data = x->data<T>(); auto* out_data = out->mutable_data<T>(ctx.GetPlace()); auto flip_dims = ctx.template Attr<std::vector<int>>("axis"); const int flip_dims_size = static_cast<int>(flip_dims.size()); auto x_dims = x->dims(); const int total_dims = x_dims.size(); const int N = x->numel(); int block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); for (size_t i = 0; i < flip_dims.size(); ++i) { if (flip_dims[i] < 0) { flip_dims[i] += total_dims; } } auto x_stride = framework::stride(x_dims); std::vector<int64_t> x_dims_v = framework::vectorize(x_dims); std::vector<int64_t> x_stride_v = framework::vectorize(x_stride); int bytes = total_dims * sizeof(int64_t); auto x_strides_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_strides_array_gpu = reinterpret_cast<int64_t*>(x_strides_array_tmp->ptr()); memory::Copy(gplace, x_strides_array_gpu, cplace, x_stride_v.data(), bytes, dev_ctx.stream()); auto x_shape_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_shape_array_gpu = reinterpret_cast<int64_t*>(x_shape_array_tmp->ptr()); memory::Copy(gplace, x_shape_array_gpu, cplace, x_dims_v.data(), bytes, dev_ctx.stream()); bytes = flip_dims_size * sizeof(int); auto flip_dims_array_tmp = memory::Alloc(dev_ctx, bytes); int* flip_dims_array_gpu = reinterpret_cast<int*>(flip_dims_array_tmp->ptr()); memory::Copy(gplace, flip_dims_array_gpu, cplace, flip_dims.data(), bytes, dev_ctx.stream()); flip_cuda_kernel< T><<<dim_grid, dim_block, 0, ctx.cuda_device_context().stream()>>>( N, in_data, out_data, x_shape_array_gpu, x_strides_array_gpu, flip_dims_array_gpu, flip_dims_size, total_dims); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( flip, ops::FlipKernel<paddle::platform::CUDADeviceContext, float>, ops::FlipKernel<paddle::platform::CUDADeviceContext, double>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::FlipKernel<paddle::platform::CUDADeviceContext, bool>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::complex<float>>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::complex<double>>);
f420cf9172d1595bdecdb868946586bc71d24a8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/count.h> #include <thrust/execution_policy.h> template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result) { *result = thrust::partition(exec, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename ExecutionPolicy> void TestPartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); hipLaunchKernelGGL(( partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), is_even<T>(), result.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionDeviceSeq() { TestPartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionDeviceSeq); void TestPartitionDeviceDevice() { TestPartitionDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionDeviceDevice); void TestPartitionDeviceNoSync() { TestPartitionDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestPartitionDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result) { *result = thrust::partition(exec, first, last, stencil_first, pred); } template<typename ExecutionPolicy> void TestPartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); hipLaunchKernelGGL(( partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionStencilDeviceSeq() { TestPartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionStencilDeviceSeq); void TestPartitionStencilDeviceDevice() { TestPartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionStencilDeviceDevice); void TestPartitionStencilDeviceNoSync() { TestPartitionStencilDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestPartitionStencilDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyDeviceSeq() { TestPartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyDeviceSeq); void TestPartitionCopyDeviceDevice() { TestPartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyDeviceDevice); void TestPartitionCopyDeviceNoSync() { TestPartitionCopyDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestPartitionCopyDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyStencilDeviceSeq() { TestPartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceSeq); void TestPartitionCopyStencilDeviceDevice() { TestPartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceDevice); void TestPartitionCopyStencilDeviceNoSync() { TestPartitionCopyStencilDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2, typename Iterator3> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result, Iterator3 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); hipLaunchKernelGGL(( stable_partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), is_even<T>(), result.begin(), is_supported.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionDeviceSeq() { TestStablePartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionDeviceSeq); void TestStablePartitionDeviceDevice() { TestStablePartitionDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionDeviceDevice); void TestStablePartitionDeviceNoSync() { TestStablePartitionDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestStablePartitionDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3, typename Iterator4> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result, Iterator4 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, stencil_first, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); hipLaunchKernelGGL(( stable_partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin(), is_supported.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionStencilDeviceSeq() { TestStablePartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceSeq); void TestStablePartitionStencilDeviceDevice() { TestStablePartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceDevice); void TestStablePartitionStencilDeviceNoSync() { TestStablePartitionStencilDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::stable_partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( stable_partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyDeviceSeq() { TestStablePartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceSeq); void TestStablePartitionCopyDeviceDevice() { TestStablePartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceDevice); void TestStablePartitionCopyDeviceNoSync() { TestStablePartitionCopyDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::stable_partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( stable_partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyStencilDeviceSeq() { TestStablePartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceSeq); void TestStablePartitionCopyStencilDeviceDevice() { TestStablePartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceDevice); void TestStablePartitionCopyStencilDeviceNoSync() { TestStablePartitionCopyStencilDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceNoSync); template<typename ExecutionPolicy> void TestPartitionCudaStreams(ExecutionPolicy policy) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; typedef Vector::iterator Iterator; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; hipStream_t s; hipStreamCreate(&s); auto streampolicy = policy.on(s); Iterator iter = thrust::partition(streampolicy, data.begin(), data.end(), is_even<T>()); Vector ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(iter - data.begin(), 2); ASSERT_EQUAL(data, ref); hipStreamDestroy(s); } void TestPartitionCudaStreamsSync() { TestPartitionCudaStreams(thrust::hip::par); } DECLARE_UNITTEST(TestPartitionCudaStreamsSync); void TestPartitionCudaStreamsNoSync() { TestPartitionCudaStreams(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestPartitionCudaStreamsNoSync);
f420cf9172d1595bdecdb868946586bc71d24a8f.cu
#include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/count.h> #include <thrust/execution_policy.h> template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result) { *result = thrust::partition(exec, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename ExecutionPolicy> void TestPartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), is_even<T>(), result.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionDeviceSeq() { TestPartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionDeviceSeq); void TestPartitionDeviceDevice() { TestPartitionDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionDeviceDevice); void TestPartitionDeviceNoSync() { TestPartitionDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestPartitionDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result) { *result = thrust::partition(exec, first, last, stencil_first, pred); } template<typename ExecutionPolicy> void TestPartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionStencilDeviceSeq() { TestPartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionStencilDeviceSeq); void TestPartitionStencilDeviceDevice() { TestPartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionStencilDeviceDevice); void TestPartitionStencilDeviceNoSync() { TestPartitionStencilDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestPartitionStencilDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyDeviceSeq() { TestPartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyDeviceSeq); void TestPartitionCopyDeviceDevice() { TestPartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyDeviceDevice); void TestPartitionCopyDeviceNoSync() { TestPartitionCopyDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestPartitionCopyDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyStencilDeviceSeq() { TestPartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceSeq); void TestPartitionCopyStencilDeviceDevice() { TestPartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceDevice); void TestPartitionCopyStencilDeviceNoSync() { TestPartitionCopyStencilDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2, typename Iterator3> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result, Iterator3 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); stable_partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), is_even<T>(), result.begin(), is_supported.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionDeviceSeq() { TestStablePartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionDeviceSeq); void TestStablePartitionDeviceDevice() { TestStablePartitionDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionDeviceDevice); void TestStablePartitionDeviceNoSync() { TestStablePartitionDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestStablePartitionDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3, typename Iterator4> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result, Iterator4 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, stencil_first, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); stable_partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin(), is_supported.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionStencilDeviceSeq() { TestStablePartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceSeq); void TestStablePartitionStencilDeviceDevice() { TestStablePartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceDevice); void TestStablePartitionStencilDeviceNoSync() { TestStablePartitionStencilDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::stable_partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); stable_partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyDeviceSeq() { TestStablePartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceSeq); void TestStablePartitionCopyDeviceDevice() { TestStablePartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceDevice); void TestStablePartitionCopyDeviceNoSync() { TestStablePartitionCopyDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::stable_partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); stable_partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyStencilDeviceSeq() { TestStablePartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceSeq); void TestStablePartitionCopyStencilDeviceDevice() { TestStablePartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceDevice); void TestStablePartitionCopyStencilDeviceNoSync() { TestStablePartitionCopyStencilDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceNoSync); template<typename ExecutionPolicy> void TestPartitionCudaStreams(ExecutionPolicy policy) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; typedef Vector::iterator Iterator; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; cudaStream_t s; cudaStreamCreate(&s); auto streampolicy = policy.on(s); Iterator iter = thrust::partition(streampolicy, data.begin(), data.end(), is_even<T>()); Vector ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(iter - data.begin(), 2); ASSERT_EQUAL(data, ref); cudaStreamDestroy(s); } void TestPartitionCudaStreamsSync() { TestPartitionCudaStreams(thrust::cuda::par); } DECLARE_UNITTEST(TestPartitionCudaStreamsSync); void TestPartitionCudaStreamsNoSync() { TestPartitionCudaStreams(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestPartitionCudaStreamsNoSync);
678b8b81d30d6036275f87df43ecbfb2d99baa5f.hip
// !!! This is a file automatically generated by hipify!!! /***************************************************************************** Name : cudapcg.cu Author : Flores, Facundo Gabriel e-mail : [email protected] Version : 0.1 Description : This is the imeplementation of the pre conditionated conjugate gradient method using CUDA and cuBlas. License : Copyright (C) 2012 Flores, Facundo Gabriel This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. **************************************************************************** */ #include <iostream> #include <cstdlib> #include <hip/hip_runtime.h> #include <rocblas.h> #include <vector> #include <sys/time.h> #include <cudapcg.cuh> using namespace std; /** * Set a null vector. * Remember you have to allocate memory previously * @param dst the null vector * @param N the vector Width */ void cudapcgFillZeros(float *dst, const int N) { for(int i = 0; i < N; i++) dst[i] = 0; } /** * Take the vector from the linear container * @param dst the C++ vector * @param vec the C array */ void cudapcgGetArray(float *dst, const vector<float> &vec) { for(int i = 0; i < vec.size(); i++) dst[i] = vec[i]; } /** * Solve a linear system of equations using the * conjugate gradient method. * @param Matrix_A Matrix A * @param Vector_B Vector B * @param Vector_X Vector X * @param e allowed error * @param MaxIter allowed iterations * @return the time spent */ long cuda_PCG(const linearMatrix &Matrix_A, const linearMatrix &Matrix_M, const linearMatrix &Vector_B, linearMatrix &Vector_X, const float e, const int MaxIter) { int k = 0; int N = Vector_B.Get_Width(); float *nullvec = (float *)malloc(sizeof(float) * N); if(!nullvec) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } cudapcgFillZeros(nullvec, N); /* ********** HOST VARIABLES ********** */ float *mA = (float *)malloc(sizeof(float) * N * N); if(!mA) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } float *mM = (float *)malloc(sizeof(float) * N * N); if(!mM) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } float *vB = (float *)malloc(sizeof(float) * N); if(!vB) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } float *vX = (float *)malloc(sizeof(float) * N); if(!vX) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } //Set vectors cudapcgGetArray(mA, Matrix_A.Get_Vector()); cudapcgGetArray(vB, Vector_B.Get_Vector()); cudapcgGetArray(vX, Vector_X.Get_Vector()); cudapcgGetArray(mM, Matrix_M.Get_Vector()); /* ************************************ */ /* ********** DEVICE VARIABLES ********** */ hipError_t cudaStat ; hipblasStatus_t stat ; hipblasHandle_t handle ; float *dev_A; float *dev_M; float *dev_z; float *dev_B; float *dev_X; float *dev_tmp; float *dev_r; float *dev_p; float *dev_w; float *dev_r_tmp; float *dev_null; /* ======================== Allocating ======================== */ cudaStat = hipMalloc( (void **)& dev_null, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_A, N * N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_M, N * N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_B, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl;; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_z, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl;; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_X, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_tmp, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_r, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_p, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_w, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = hipMalloc( (void **)& dev_r_tmp, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } /* ================================================================ */ /* ==================== Settings ==================== */ stat = hipblasCreate(&handle); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS Initialization failed!" << endl; exit(EXIT_FAILURE); } stat = hipblasSetMatrix(N, N, sizeof(float), mA, N, dev_A, N); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting matrix failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetMatrix(N, N, sizeof(float), mM, N, dev_M, N); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting matrix failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_null, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector NULL failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_z, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector Z failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), vB, 1, dev_B, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vectorB failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_X, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vectorX failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_tmp, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector tmp failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_r, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector r failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_p, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector p failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_w, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector w failed" << endl; exit(EXIT_FAILURE); } stat = hipblasSetVector(N, sizeof(float), nullvec, 1, dev_r_tmp, 1); if(stat != HIPBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector r tmp failed" << endl; exit(EXIT_FAILURE); } /* ======================================================== */ /* ************************************** */ /* ********** CG Method ********** */ timeval start; timeval end; long seconds, useconds, final; gettimeofday(&start, 0); float delta; float delta_old; float myerror; float beta = 0; float alpha; float norm_b; float new_e; float dotprod; //dev_tmp = - dev_A * dev_X(matrix-vector multiplication float gemv_alpha = -1; float gemv_beta = 1; hipblasSgemv(handle, HIPBLAS_OP_N, N, N, &gemv_alpha, dev_A, N, dev_X, 1, &gemv_beta, dev_tmp, 1); //dev_tmp = dev_B + dev_tmp gemv_alpha = 1; hipblasSaxpy(handle, N, &gemv_alpha, dev_B, 1, dev_tmp, 1); //r = dev_tmp hipMemcpy(dev_r, dev_tmp, N * sizeof(float), hipMemcpyDeviceToDevice); //delta = <r,r> hipblasSdot(handle, N, dev_r, 1, dev_r, 1, &dotprod); delta = dotprod; myerror = sqrt(delta); //||Vector_B|| hipblasSdot(handle, N, dev_B, 1, dev_B, 1, &dotprod); norm_b = sqrt(dotprod); new_e = e * norm_b; while(myerror > new_e && k < MaxIter) { k++; gemv_alpha = 1; gemv_beta = 1; hipMemcpy(dev_z, dev_null, N * sizeof(float), hipMemcpyDeviceToDevice); hipblasSgemv(handle, HIPBLAS_OP_T, N, N, &gemv_alpha, dev_M, N, dev_r, 1, &gemv_beta, dev_z, 1); hipblasSdot(handle, N, dev_z, 1, dev_r, 1, &delta); if(k == 1) { beta = 0; hipMemcpy(dev_p, dev_z, N * sizeof(float), hipMemcpyDeviceToDevice); } else { beta = delta / delta_old; //helper hipMemcpy(dev_r_tmp, dev_z, N * sizeof(float), hipMemcpyDeviceToDevice); //p = z + beta * p hipblasSaxpy(handle, N, &beta, dev_p, 1, dev_r_tmp, 1); hipMemcpy(dev_p, dev_r_tmp, N * sizeof(float), hipMemcpyDeviceToDevice); } hipMemcpy(dev_w, dev_null, N * sizeof(float), hipMemcpyDeviceToDevice); //w = A*p gemv_alpha = 1; // tmp gemv_beta = 1; hipblasSgemv(handle, HIPBLAS_OP_T, N, N, &gemv_alpha, dev_A, N, dev_p, 1, &gemv_beta, dev_w, 1); //alpha = delta / <p,w> hipblasSdot(handle, N, dev_p, 1, dev_w, 1, &dotprod); alpha = delta / dotprod; //x = alpha * p + x gemv_alpha = alpha; hipblasSaxpy(handle, N, &gemv_alpha, dev_p, 1, dev_X, 1); //r = r - alpha * w gemv_alpha = alpha * (-1); hipblasSaxpy(handle, N, &gemv_alpha, dev_w, 1, dev_r, 1); delta_old = delta; //delta = <r,r> hipblasSdot(handle, N, dev_r, 1, dev_r, 1, &dotprod); delta = dotprod; myerror = sqrt(delta); } if(k == MaxIter) cout << "Can't Solve that system of equations!" << endl; /* ****************************** */ gettimeofday(&end, 0); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; final = ((seconds) * 1000 + useconds/1000.0) + 0.5; //Give the result to host hipMemcpy(vX, dev_X, N * sizeof(float), hipMemcpyDeviceToHost); vector<float> v_tmp(vX, vX + N); Vector_X.Set_Matrix(v_tmp, N, 1); hipFree(dev_M); hipFree(dev_r_tmp); hipFree(dev_w); hipFree(dev_p); hipFree(dev_r); hipFree(dev_z); hipFree(dev_tmp); hipFree(dev_X); hipFree(dev_B); hipFree(dev_A); hipFree(dev_null); hipblasDestroy(handle); free(mA); free(vB); free(vX); free(nullvec); free(mM); return final; }
678b8b81d30d6036275f87df43ecbfb2d99baa5f.cu
/***************************************************************************** Name : cudapcg.cu Author : Flores, Facundo Gabriel e-mail : [email protected] Version : 0.1 Description : This is the imeplementation of the pre conditionated conjugate gradient method using CUDA and cuBlas. License : Copyright (C) 2012 Flores, Facundo Gabriel This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. **************************************************************************** */ #include <iostream> #include <cstdlib> #include <cuda_runtime.h> #include <cublas_v2.h> #include <vector> #include <sys/time.h> #include <cudapcg.cuh> using namespace std; /** * Set a null vector. * Remember you have to allocate memory previously * @param dst the null vector * @param N the vector Width */ void cudapcgFillZeros(float *dst, const int N) { for(int i = 0; i < N; i++) dst[i] = 0; } /** * Take the vector from the linear container * @param dst the C++ vector * @param vec the C array */ void cudapcgGetArray(float *dst, const vector<float> &vec) { for(int i = 0; i < vec.size(); i++) dst[i] = vec[i]; } /** * Solve a linear system of equations using the * conjugate gradient method. * @param Matrix_A Matrix A * @param Vector_B Vector B * @param Vector_X Vector X * @param e allowed error * @param MaxIter allowed iterations * @return the time spent */ long cuda_PCG(const linearMatrix &Matrix_A, const linearMatrix &Matrix_M, const linearMatrix &Vector_B, linearMatrix &Vector_X, const float e, const int MaxIter) { int k = 0; int N = Vector_B.Get_Width(); float *nullvec = (float *)malloc(sizeof(float) * N); if(!nullvec) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } cudapcgFillZeros(nullvec, N); /* ********** HOST VARIABLES ********** */ float *mA = (float *)malloc(sizeof(float) * N * N); if(!mA) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } float *mM = (float *)malloc(sizeof(float) * N * N); if(!mM) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } float *vB = (float *)malloc(sizeof(float) * N); if(!vB) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } float *vX = (float *)malloc(sizeof(float) * N); if(!vX) { cout << "Cannot allocate memory" << endl; exit(EXIT_FAILURE); } //Set vectors cudapcgGetArray(mA, Matrix_A.Get_Vector()); cudapcgGetArray(vB, Vector_B.Get_Vector()); cudapcgGetArray(vX, Vector_X.Get_Vector()); cudapcgGetArray(mM, Matrix_M.Get_Vector()); /* ************************************ */ /* ********** DEVICE VARIABLES ********** */ cudaError_t cudaStat ; cublasStatus_t stat ; cublasHandle_t handle ; float *dev_A; float *dev_M; float *dev_z; float *dev_B; float *dev_X; float *dev_tmp; float *dev_r; float *dev_p; float *dev_w; float *dev_r_tmp; float *dev_null; /* ======================== Allocating ======================== */ cudaStat = cudaMalloc( (void **)& dev_null, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_A, N * N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_M, N * N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_B, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl;; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_z, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl;; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_X, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_tmp, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_r, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_p, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_w, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } cudaStat = cudaMalloc( (void **)& dev_r_tmp, N * sizeof(float) ); if(cudaStat != EXIT_SUCCESS) { cout << "device memory allocation failed" << endl; exit(EXIT_FAILURE); } /* ================================================================ */ /* ==================== Settings ==================== */ stat = cublasCreate(&handle); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS Initialization failed!" << endl; exit(EXIT_FAILURE); } stat = cublasSetMatrix(N, N, sizeof(float), mA, N, dev_A, N); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting matrix failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetMatrix(N, N, sizeof(float), mM, N, dev_M, N); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting matrix failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_null, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector NULL failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_z, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector Z failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), vB, 1, dev_B, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vectorB failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_X, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vectorX failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_tmp, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector tmp failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_r, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector r failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_p, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector p failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_w, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector w failed" << endl; exit(EXIT_FAILURE); } stat = cublasSetVector(N, sizeof(float), nullvec, 1, dev_r_tmp, 1); if(stat != CUBLAS_STATUS_SUCCESS) { cout << "CUBLAS setting vector r tmp failed" << endl; exit(EXIT_FAILURE); } /* ======================================================== */ /* ************************************** */ /* ********** CG Method ********** */ timeval start; timeval end; long seconds, useconds, final; gettimeofday(&start, 0); float delta; float delta_old; float myerror; float beta = 0; float alpha; float norm_b; float new_e; float dotprod; //dev_tmp = - dev_A * dev_X(matrix-vector multiplication float gemv_alpha = -1; float gemv_beta = 1; cublasSgemv(handle, CUBLAS_OP_N, N, N, &gemv_alpha, dev_A, N, dev_X, 1, &gemv_beta, dev_tmp, 1); //dev_tmp = dev_B + dev_tmp gemv_alpha = 1; cublasSaxpy(handle, N, &gemv_alpha, dev_B, 1, dev_tmp, 1); //r = dev_tmp cudaMemcpy(dev_r, dev_tmp, N * sizeof(float), cudaMemcpyDeviceToDevice); //delta = <r,r> cublasSdot(handle, N, dev_r, 1, dev_r, 1, &dotprod); delta = dotprod; myerror = sqrt(delta); //||Vector_B|| cublasSdot(handle, N, dev_B, 1, dev_B, 1, &dotprod); norm_b = sqrt(dotprod); new_e = e * norm_b; while(myerror > new_e && k < MaxIter) { k++; gemv_alpha = 1; gemv_beta = 1; cudaMemcpy(dev_z, dev_null, N * sizeof(float), cudaMemcpyDeviceToDevice); cublasSgemv(handle, CUBLAS_OP_T, N, N, &gemv_alpha, dev_M, N, dev_r, 1, &gemv_beta, dev_z, 1); cublasSdot(handle, N, dev_z, 1, dev_r, 1, &delta); if(k == 1) { beta = 0; cudaMemcpy(dev_p, dev_z, N * sizeof(float), cudaMemcpyDeviceToDevice); } else { beta = delta / delta_old; //helper cudaMemcpy(dev_r_tmp, dev_z, N * sizeof(float), cudaMemcpyDeviceToDevice); //p = z + beta * p cublasSaxpy(handle, N, &beta, dev_p, 1, dev_r_tmp, 1); cudaMemcpy(dev_p, dev_r_tmp, N * sizeof(float), cudaMemcpyDeviceToDevice); } cudaMemcpy(dev_w, dev_null, N * sizeof(float), cudaMemcpyDeviceToDevice); //w = A*p gemv_alpha = 1; // tmp gemv_beta = 1; cublasSgemv(handle, CUBLAS_OP_T, N, N, &gemv_alpha, dev_A, N, dev_p, 1, &gemv_beta, dev_w, 1); //alpha = delta / <p,w> cublasSdot(handle, N, dev_p, 1, dev_w, 1, &dotprod); alpha = delta / dotprod; //x = alpha * p + x gemv_alpha = alpha; cublasSaxpy(handle, N, &gemv_alpha, dev_p, 1, dev_X, 1); //r = r - alpha * w gemv_alpha = alpha * (-1); cublasSaxpy(handle, N, &gemv_alpha, dev_w, 1, dev_r, 1); delta_old = delta; //delta = <r,r> cublasSdot(handle, N, dev_r, 1, dev_r, 1, &dotprod); delta = dotprod; myerror = sqrt(delta); } if(k == MaxIter) cout << "Can't Solve that system of equations!" << endl; /* ****************************** */ gettimeofday(&end, 0); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; final = ((seconds) * 1000 + useconds/1000.0) + 0.5; //Give the result to host cudaMemcpy(vX, dev_X, N * sizeof(float), cudaMemcpyDeviceToHost); vector<float> v_tmp(vX, vX + N); Vector_X.Set_Matrix(v_tmp, N, 1); cudaFree(dev_M); cudaFree(dev_r_tmp); cudaFree(dev_w); cudaFree(dev_p); cudaFree(dev_r); cudaFree(dev_z); cudaFree(dev_tmp); cudaFree(dev_X); cudaFree(dev_B); cudaFree(dev_A); cudaFree(dev_null); cublasDestroy(handle); free(mA); free(vB); free(vX); free(nullvec); free(mM); return final; }
33b96edb4fe22286c689420f7ffe61b29b0596a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include"../common/base.h" __global__ void add(int a, int b, int* c) { *c = a+b; } int main() { int c; int *dev_c; HANDLE_ERROR( hipMalloc( (void**)&dev_c, sizeof(int))); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 1, 2, dev_c); HANDLE_ERROR( hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost)); std::cout<<"1+2="<<c<<std::endl; hipFree(dev_c); return 0; }
33b96edb4fe22286c689420f7ffe61b29b0596a1.cu
#include<iostream> #include"../common/base.h" __global__ void add(int a, int b, int* c) { *c = a+b; } int main() { int c; int *dev_c; HANDLE_ERROR( cudaMalloc( (void**)&dev_c, sizeof(int))); add<<<1,1>>>(1, 2, dev_c); HANDLE_ERROR( cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost)); std::cout<<"1+2="<<c<<std::endl; cudaFree(dev_c); return 0; }
fd6e3bf67c590eb771285e7395f2169d6ca5097b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *a, int *b, int *c) { int index = threadIdx.x + blockInx.x * blockDim.x; c[index] = a[index] + b[index]; } int main() { int *a, *b, *c; // host copies of variables a, b & c int *d_a, *d_b, *d_c; // device copies of variables a, b & c int size = N * sizeof(int); // Allocate space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // Setup input values a = (int *) malloc(size); b = (int *) malloc(size); c = (int *) malloc(size); for (int i = 0; i < N; i++) { a[i] = i; b[i] = -i; } // Copy inputs to device hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( add), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c); // Copy result back to host hipError_t err = hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost); if(err!=hipSuccess) { printf("CUDA error copying to Host: %s\n", hipGetErrorString(err)); } for (int i = 0; i < 10; i++) { printf("%d ", c[i]); } // Cleanup free(a); free(b); free(c) hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
fd6e3bf67c590eb771285e7395f2169d6ca5097b.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *a, int *b, int *c) { int index = threadIdx.x + blockInx.x * blockDim.x; c[index] = a[index] + b[index]; } int main() { int *a, *b, *c; // host copies of variables a, b & c int *d_a, *d_b, *d_c; // device copies of variables a, b & c int size = N * sizeof(int); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Setup input values a = (int *) malloc(size); b = (int *) malloc(size); c = (int *) malloc(size); for (int i = 0; i < N; i++) { a[i] = i; b[i] = -i; } // Copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); // Copy result back to host cudaError err = cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); if(err!=cudaSuccess) { printf("CUDA error copying to Host: %s\n", cudaGetErrorString(err)); } for (int i = 0; i < 10; i++) { printf("%d ", c[i]); } // Cleanup free(a); free(b); free(c) cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
7ca8a3db00a8d00dba35f765289186bd07a6da88.hip
// !!! This is a file automatically generated by hipify!!! /*This version of CUDA code does use hipHostMalloc for async IO acceleration */ #include "../../utils/img_io.cpp" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <bits/stdc++.h> #include <sys/time.h> #define ANSI_COLOR_RED "\x1b[31m" #define ANSI_COLOR_GREEN "\x1b[32m" #define ANSI_COLOR_YELLOW "\x1b[33m" #define ANSI_COLOR_BLUE "\x1b[34m" #define ANSI_COLOR_MAGENTA "\x1b[35m" #define ANSI_COLOR_CYAN "\x1b[36m" #define ANSI_COLOR_RESET "\x1b[0m" using namespace std; // CUDA Stream #define N_STREAMS 1200 // Gaussian filter int filter_size; unsigned int filter_scale, filter_row; unsigned int *filter; // Image IO unsigned char *img_input; unsigned char *img_output; // Global data int thread_cnt, block_row; int cudaError_cnt; string img_name; // CUDA error checker void cuda_err_chk(const hipError_t& e, const int& cudaError_cnt){ if(e != hipSuccess){ fprintf(stderr, "hipError_t in no. %d: %s\n", cudaError_cnt, hipGetErrorString(e)); exit(EXIT_FAILURE); } } // Kernel, 1D dim and grid configuration version __global__ void cuda_gaussian_filter_thread_2D(unsigned char* img_input_cuda, unsigned char* img_output_cuda, int img_row, int img_col, int shift, unsigned int* filter_cuda, int filter_row, unsigned int filter_scale, int img_border){ int cuda_col = blockIdx.x * blockDim.x + threadIdx.x; int cuda_row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int tmp = 0; int target = 0; int a, b; if (3 * (cuda_row * img_col + cuda_col) + shift >= img_border){ return; } for(int j = 0; j < filter_row; j++){ for(int i = 0; i < filter_row; i++){ a = cuda_col + i - (filter_row / 2); b = cuda_row + j - (filter_row / 2); target = 3 * (b * img_col + a) + shift; if (target >= img_border || target < 0){ continue; } tmp += filter_cuda[j * filter_row + i] * img_input_cuda[target]; } } tmp /= filter_scale; if(tmp > 255){ tmp = 255; } img_output_cuda[3 * (cuda_row * img_col + cuda_col) + shift] = tmp; } int cuda_run(const int& img_row, const int& img_col, const int& resolution, const int& async){ /*-------------- CUDA init ------------*/ // Allocate memory hipHostMalloc((void**)&img_output, resolution * sizeof(unsigned char)); unsigned char* img_input_cuda; unsigned char* img_output_cuda; unsigned int* filter_cuda; cuda_err_chk(hipMalloc((void**) &img_input_cuda, resolution * sizeof(unsigned char)), cudaError_cnt++); cuda_err_chk(hipMalloc((void**) &img_output_cuda, resolution * sizeof(unsigned char)), cudaError_cnt++); cuda_err_chk(hipMalloc((void**) &filter_cuda, filter_size * sizeof(unsigned int)), cudaError_cnt++); // Copy memory from host to GPU cuda_err_chk(hipMemcpy(filter_cuda, filter, filter_size * sizeof(unsigned int), hipMemcpyHostToDevice), cudaError_cnt++); // Thread configurations const dim3 block_size(block_row, block_row); const dim3 grid_size_sync((img_col + block_row - 1) / block_row, (img_row + block_row - 1) / (block_row)); const dim3 grid_size_async((img_col + block_row - 1) / block_row, (img_row / N_STREAMS + block_row - 1) / (block_row)); // Init CUDA streams int offset = 0; int chunk_size = resolution / N_STREAMS; hipStream_t streams[N_STREAMS]; for (int i = 0; i < N_STREAMS; i++) { hipStreamCreate(&streams[i]); } // Measuring time struct timeval start, end; gettimeofday(&start, 0); if(async){ /*-------------- CUDA run async ------------*/ for(int j = 0; j < N_STREAMS; j++){ offset = chunk_size * j; cuda_err_chk(hipMemcpyAsync(img_input_cuda + offset, img_input + offset, chunk_size * sizeof(unsigned char), hipMemcpyHostToDevice, streams[j]), cudaError_cnt++); for(int i = 0; i < 3; i++) { hipLaunchKernelGGL(( cuda_gaussian_filter_thread_2D), dim3(grid_size_async), dim3(block_size), 0, streams[j], img_input_cuda + offset, img_output_cuda + offset, img_row, img_col, i, filter_cuda, filter_row, filter_scale, chunk_size); } cuda_err_chk(hipMemcpyAsync(img_output + offset, img_output_cuda + offset, chunk_size * sizeof(unsigned char), hipMemcpyDeviceToHost, streams[j]), cudaError_cnt++); } // for(int j = 0; j < N_STREAMS; j++){ // offset = chunk_size * j; // cuda_err_chk(hipMemcpyAsync(img_input_cuda + offset, img_input + offset, chunk_size * sizeof(unsigned char), hipMemcpyHostToDevice, streams[j]), cudaError_cnt++); // } // for(int j = 0; j < N_STREAMS; j++){ // offset = chunk_size * j; // for(int i = 0; i < 3; i++) { // hipLaunchKernelGGL(( cuda_gaussian_filter_thread_2D), dim3(grid_size_async), dim3(block_size), 0, streams[j], img_input_cuda + offset, img_output_cuda + offset, img_row, img_col, i, filter_cuda, filter_row, filter_scale, chunk_size); // } // } // for(int j = 0; j < N_STREAMS; j++){ // offset = chunk_size * j; // cuda_err_chk(hipMemcpyAsync(img_output + offset, img_output_cuda + offset, chunk_size * sizeof(unsigned char), hipMemcpyDeviceToHost, streams[j]), cudaError_cnt++); // } } else{ /*-------------- CUDA run sync ------------*/ cuda_err_chk(hipMemcpy(img_input_cuda, img_input, resolution * sizeof(unsigned char), hipMemcpyHostToDevice), cudaError_cnt++); for(int i = 0; i < 3; i++) { hipLaunchKernelGGL(( cuda_gaussian_filter_thread_2D), dim3(grid_size_sync), dim3(block_size), 0, 0, img_input_cuda, img_output_cuda, img_row, img_col, i, filter_cuda, filter_row, filter_scale, resolution); } cuda_err_chk(hipMemcpy(img_output, img_output_cuda, resolution * sizeof(unsigned char), hipMemcpyDeviceToHost), cudaError_cnt++); } cuda_err_chk(hipDeviceSynchronize(), cudaError_cnt++); gettimeofday(&end, 0); int sec = end.tv_sec - start.tv_sec; int usec = end.tv_usec - start.tv_usec; int t_gpu = sec * 1000 + (usec / 1000); printf(ANSI_COLOR_RED "GPU time (ms): %d " ANSI_COLOR_RESET "\n", t_gpu); // Copy image from malloc unsigned char* img_CV = new unsigned char [resolution]; for(int i = 0; i < img_row; i++){ for(int j = 0; j < img_col; j++){ img_CV[3 * (i * img_col + j) + 0] = img_output[3 * (i * img_col + j) + 0]; img_CV[3 * (i * img_col + j) + 1] = img_output[3 * (i * img_col + j) + 1]; img_CV[3 * (i * img_col + j) + 2] = img_output[3 * (i * img_col + j) + 2]; } } img_write(img_name, img_output, img_row, img_col, 2, async); free(img_CV); cuda_err_chk(hipFree(img_input_cuda), cudaError_cnt++); cuda_err_chk(hipFree(img_output_cuda), cudaError_cnt++); cuda_err_chk(hipFree(filter_cuda), cudaError_cnt++); printf("[LOG]: Finished %s\n\n", async ? "async" : "sync"); return t_gpu; } void free_memory(){ hipHostFree(img_input); hipHostFree(img_output); hipHostFree(filter); } int main(int argc, char* argv[]){ /*--------------- Init -------------------*/ cudaError_cnt = 0; if(argc < 2){ fprintf(stderr, "%s", "Please provide filename for Gaussian Blur. usage ./gb_thread_1D.o <Image file> \n"); return -1; } else if(argc == 3){ sscanf(argv[2], "%d", &thread_cnt); } else{ // Set default thread count to 1024 thread_cnt = 1024; } block_row = (int)sqrt(thread_cnt); int num = 0; hipGetDeviceCount(&num); hipDeviceProp_t prop; if(num > 0){ hipGetDeviceProperties(&prop, 0); printf("[LOG]: Device: %s \n", prop.name); } else{ fprintf(stderr, "%s", "No NVIDIA GPU detected!\n"); return 1; } /*---------------- Image and mask IO ----*/ int img_row, img_col, resolution; unsigned char* img_CV; img_name = argv[1]; img_CV = img_read(img_name, img_row, img_col); resolution = 3 * img_row * img_col; hipHostMalloc((void**)&img_input, resolution * sizeof(unsigned char)); for(int i = 0; i < img_row; i++){ for(int j = 0; j < img_col; j++){ img_input[3 * (i * img_col + j) + 0] = img_CV[3 * (i * img_col + j) + 0]; img_input[3 * (i * img_col + j) + 1] = img_CV[3 * (i * img_col + j) + 1]; img_input[3 * (i * img_col + j) + 2] = img_CV[3 * (i * img_col + j) + 2]; } } free(img_CV); FILE* mask; mask = fopen("mask_Gaussian.txt", "r"); fscanf(mask, "%d", &filter_size); filter_row = (int)sqrt(filter_size); // filter = new int [filter_size]; hipHostMalloc((void**)&filter, filter_size * sizeof(int)); for(int i = 0; i < filter_size; i++){ fscanf(mask, "%u", &filter[i]); } filter_scale = 0; for(int i = 0; i < filter_size; i++){ filter_scale += filter[i]; } fclose(mask); /*-------------- CUDA run ------------*/ int t1, t2; t2 = cuda_run(img_row, img_col, resolution, 1); t1 = cuda_run(img_row, img_col, resolution, 0); printf(ANSI_COLOR_YELLOW "[RESULT]: [img_row, img_col, threads in each block]" ANSI_COLOR_RESET "\n"); printf(ANSI_COLOR_YELLOW "[RESULT]: [stream workers, speedup ratio]" ANSI_COLOR_RESET "\n"); printf("%d, %d, %d\n", img_row, img_col, thread_cnt); printf("%d, %.2f\n", N_STREAMS, ((float) t1 / (float)t2)); /*-------------- Cleanup ------------*/ free_memory(); return 0; }
7ca8a3db00a8d00dba35f765289186bd07a6da88.cu
/*This version of CUDA code does use cudaMallocHost for async IO acceleration */ #include "../../utils/img_io.cpp" #include <cuda.h> #include <cuda_runtime.h> #include <bits/stdc++.h> #include <sys/time.h> #define ANSI_COLOR_RED "\x1b[31m" #define ANSI_COLOR_GREEN "\x1b[32m" #define ANSI_COLOR_YELLOW "\x1b[33m" #define ANSI_COLOR_BLUE "\x1b[34m" #define ANSI_COLOR_MAGENTA "\x1b[35m" #define ANSI_COLOR_CYAN "\x1b[36m" #define ANSI_COLOR_RESET "\x1b[0m" using namespace std; // CUDA Stream #define N_STREAMS 1200 // Gaussian filter int filter_size; unsigned int filter_scale, filter_row; unsigned int *filter; // Image IO unsigned char *img_input; unsigned char *img_output; // Global data int thread_cnt, block_row; int cudaError_cnt; string img_name; // CUDA error checker void cuda_err_chk(const cudaError_t& e, const int& cudaError_cnt){ if(e != cudaSuccess){ fprintf(stderr, "cudaError in no. %d: %s\n", cudaError_cnt, cudaGetErrorString(e)); exit(EXIT_FAILURE); } } // Kernel, 1D dim and grid configuration version __global__ void cuda_gaussian_filter_thread_2D(unsigned char* img_input_cuda, unsigned char* img_output_cuda, int img_row, int img_col, int shift, unsigned int* filter_cuda, int filter_row, unsigned int filter_scale, int img_border){ int cuda_col = blockIdx.x * blockDim.x + threadIdx.x; int cuda_row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int tmp = 0; int target = 0; int a, b; if (3 * (cuda_row * img_col + cuda_col) + shift >= img_border){ return; } for(int j = 0; j < filter_row; j++){ for(int i = 0; i < filter_row; i++){ a = cuda_col + i - (filter_row / 2); b = cuda_row + j - (filter_row / 2); target = 3 * (b * img_col + a) + shift; if (target >= img_border || target < 0){ continue; } tmp += filter_cuda[j * filter_row + i] * img_input_cuda[target]; } } tmp /= filter_scale; if(tmp > 255){ tmp = 255; } img_output_cuda[3 * (cuda_row * img_col + cuda_col) + shift] = tmp; } int cuda_run(const int& img_row, const int& img_col, const int& resolution, const int& async){ /*-------------- CUDA init ------------*/ // Allocate memory cudaMallocHost((void**)&img_output, resolution * sizeof(unsigned char)); unsigned char* img_input_cuda; unsigned char* img_output_cuda; unsigned int* filter_cuda; cuda_err_chk(cudaMalloc((void**) &img_input_cuda, resolution * sizeof(unsigned char)), cudaError_cnt++); cuda_err_chk(cudaMalloc((void**) &img_output_cuda, resolution * sizeof(unsigned char)), cudaError_cnt++); cuda_err_chk(cudaMalloc((void**) &filter_cuda, filter_size * sizeof(unsigned int)), cudaError_cnt++); // Copy memory from host to GPU cuda_err_chk(cudaMemcpy(filter_cuda, filter, filter_size * sizeof(unsigned int), cudaMemcpyHostToDevice), cudaError_cnt++); // Thread configurations const dim3 block_size(block_row, block_row); const dim3 grid_size_sync((img_col + block_row - 1) / block_row, (img_row + block_row - 1) / (block_row)); const dim3 grid_size_async((img_col + block_row - 1) / block_row, (img_row / N_STREAMS + block_row - 1) / (block_row)); // Init CUDA streams int offset = 0; int chunk_size = resolution / N_STREAMS; cudaStream_t streams[N_STREAMS]; for (int i = 0; i < N_STREAMS; i++) { cudaStreamCreate(&streams[i]); } // Measuring time struct timeval start, end; gettimeofday(&start, 0); if(async){ /*-------------- CUDA run async ------------*/ for(int j = 0; j < N_STREAMS; j++){ offset = chunk_size * j; cuda_err_chk(cudaMemcpyAsync(img_input_cuda + offset, img_input + offset, chunk_size * sizeof(unsigned char), cudaMemcpyHostToDevice, streams[j]), cudaError_cnt++); for(int i = 0; i < 3; i++) { cuda_gaussian_filter_thread_2D<<<grid_size_async, block_size, 0, streams[j]>>>(img_input_cuda + offset, img_output_cuda + offset, img_row, img_col, i, filter_cuda, filter_row, filter_scale, chunk_size); } cuda_err_chk(cudaMemcpyAsync(img_output + offset, img_output_cuda + offset, chunk_size * sizeof(unsigned char), cudaMemcpyDeviceToHost, streams[j]), cudaError_cnt++); } // for(int j = 0; j < N_STREAMS; j++){ // offset = chunk_size * j; // cuda_err_chk(cudaMemcpyAsync(img_input_cuda + offset, img_input + offset, chunk_size * sizeof(unsigned char), cudaMemcpyHostToDevice, streams[j]), cudaError_cnt++); // } // for(int j = 0; j < N_STREAMS; j++){ // offset = chunk_size * j; // for(int i = 0; i < 3; i++) { // cuda_gaussian_filter_thread_2D<<<grid_size_async, block_size, 0, streams[j]>>>(img_input_cuda + offset, img_output_cuda + offset, img_row, img_col, i, filter_cuda, filter_row, filter_scale, chunk_size); // } // } // for(int j = 0; j < N_STREAMS; j++){ // offset = chunk_size * j; // cuda_err_chk(cudaMemcpyAsync(img_output + offset, img_output_cuda + offset, chunk_size * sizeof(unsigned char), cudaMemcpyDeviceToHost, streams[j]), cudaError_cnt++); // } } else{ /*-------------- CUDA run sync ------------*/ cuda_err_chk(cudaMemcpy(img_input_cuda, img_input, resolution * sizeof(unsigned char), cudaMemcpyHostToDevice), cudaError_cnt++); for(int i = 0; i < 3; i++) { cuda_gaussian_filter_thread_2D<<<grid_size_sync, block_size>>>(img_input_cuda, img_output_cuda, img_row, img_col, i, filter_cuda, filter_row, filter_scale, resolution); } cuda_err_chk(cudaMemcpy(img_output, img_output_cuda, resolution * sizeof(unsigned char), cudaMemcpyDeviceToHost), cudaError_cnt++); } cuda_err_chk(cudaDeviceSynchronize(), cudaError_cnt++); gettimeofday(&end, 0); int sec = end.tv_sec - start.tv_sec; int usec = end.tv_usec - start.tv_usec; int t_gpu = sec * 1000 + (usec / 1000); printf(ANSI_COLOR_RED "GPU time (ms): %d " ANSI_COLOR_RESET "\n", t_gpu); // Copy image from malloc unsigned char* img_CV = new unsigned char [resolution]; for(int i = 0; i < img_row; i++){ for(int j = 0; j < img_col; j++){ img_CV[3 * (i * img_col + j) + 0] = img_output[3 * (i * img_col + j) + 0]; img_CV[3 * (i * img_col + j) + 1] = img_output[3 * (i * img_col + j) + 1]; img_CV[3 * (i * img_col + j) + 2] = img_output[3 * (i * img_col + j) + 2]; } } img_write(img_name, img_output, img_row, img_col, 2, async); free(img_CV); cuda_err_chk(cudaFree(img_input_cuda), cudaError_cnt++); cuda_err_chk(cudaFree(img_output_cuda), cudaError_cnt++); cuda_err_chk(cudaFree(filter_cuda), cudaError_cnt++); printf("[LOG]: Finished %s\n\n", async ? "async" : "sync"); return t_gpu; } void free_memory(){ cudaFreeHost(img_input); cudaFreeHost(img_output); cudaFreeHost(filter); } int main(int argc, char* argv[]){ /*--------------- Init -------------------*/ cudaError_cnt = 0; if(argc < 2){ fprintf(stderr, "%s", "Please provide filename for Gaussian Blur. usage ./gb_thread_1D.o <Image file> \n"); return -1; } else if(argc == 3){ sscanf(argv[2], "%d", &thread_cnt); } else{ // Set default thread count to 1024 thread_cnt = 1024; } block_row = (int)sqrt(thread_cnt); int num = 0; cudaGetDeviceCount(&num); cudaDeviceProp prop; if(num > 0){ cudaGetDeviceProperties(&prop, 0); printf("[LOG]: Device: %s \n", prop.name); } else{ fprintf(stderr, "%s", "No NVIDIA GPU detected!\n"); return 1; } /*---------------- Image and mask IO ----*/ int img_row, img_col, resolution; unsigned char* img_CV; img_name = argv[1]; img_CV = img_read(img_name, img_row, img_col); resolution = 3 * img_row * img_col; cudaMallocHost((void**)&img_input, resolution * sizeof(unsigned char)); for(int i = 0; i < img_row; i++){ for(int j = 0; j < img_col; j++){ img_input[3 * (i * img_col + j) + 0] = img_CV[3 * (i * img_col + j) + 0]; img_input[3 * (i * img_col + j) + 1] = img_CV[3 * (i * img_col + j) + 1]; img_input[3 * (i * img_col + j) + 2] = img_CV[3 * (i * img_col + j) + 2]; } } free(img_CV); FILE* mask; mask = fopen("mask_Gaussian.txt", "r"); fscanf(mask, "%d", &filter_size); filter_row = (int)sqrt(filter_size); // filter = new int [filter_size]; cudaMallocHost((void**)&filter, filter_size * sizeof(int)); for(int i = 0; i < filter_size; i++){ fscanf(mask, "%u", &filter[i]); } filter_scale = 0; for(int i = 0; i < filter_size; i++){ filter_scale += filter[i]; } fclose(mask); /*-------------- CUDA run ------------*/ int t1, t2; t2 = cuda_run(img_row, img_col, resolution, 1); t1 = cuda_run(img_row, img_col, resolution, 0); printf(ANSI_COLOR_YELLOW "[RESULT]: [img_row, img_col, threads in each block]" ANSI_COLOR_RESET "\n"); printf(ANSI_COLOR_YELLOW "[RESULT]: [stream workers, speedup ratio]" ANSI_COLOR_RESET "\n"); printf("%d, %d, %d\n", img_row, img_col, thread_cnt); printf("%d, %.2f\n", N_STREAMS, ((float) t1 / (float)t2)); /*-------------- Cleanup ------------*/ free_memory(); return 0; }
cc757ba2d27cd5e355ddde4346dfee320295d4cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/weighted_softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void WeightedSoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, const Dtype* weight, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { const Dtype weight_value = static_cast<Dtype>(weight[n * spatial_dim + s]); loss[index] = - weight_value * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* weight = bottom[2]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( WeightedSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, weight, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, const Dtype* weight, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; const Dtype weight_value = static_cast<Dtype>(weight[n * spatial_dim + s]); for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= weight_value; } counts[index] = 1; } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const Dtype* weight = bottom[2]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( WeightedSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, weight, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer); } // namespace caffe
cc757ba2d27cd5e355ddde4346dfee320295d4cc.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/weighted_softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void WeightedSoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, const Dtype* weight, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { const Dtype weight_value = static_cast<Dtype>(weight[n * spatial_dim + s]); loss[index] = - weight_value * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* weight = bottom[2]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) WeightedSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, weight, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, const Dtype* weight, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; const Dtype weight_value = static_cast<Dtype>(weight[n * spatial_dim + s]); for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= weight_value; } counts[index] = 1; } } } template <typename Dtype> void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const Dtype* weight = bottom[2]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) WeightedSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, weight, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer); } // namespace caffe
634558c59980489fcc6d086b5b82d4d1ad9042b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 University of Maryland, College Park * Licensed under The Apache-2.0 License [see LICENSE for details] * \file multi_proposal_target.cc * \brief Proposal target layer * \author Bharat Singh */ #include "./multi_proposal_target_mask_satellite-inl.h" #include <set> #include <math.h> #include <unistd.h> #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include "./operator_common.h" #include "./mshadow_op.h" #include <time.h> #include <stdlib.h> //============================ // Bounding Box Transform Utils //============================ #define NUM_THREADS_NMS 1024 namespace mxnet { namespace op { namespace utils { // filter box by set confidence to zero // * height or width < rpn_min_size inline void FilterBox(float *dets, int num_dets, float min_size) { #pragma omp parallel for num_threads(8) for (int i = 0; i < num_dets; ++i) { float iw = dets[5*i + 2] - dets[5*i] + 1.0f; float ih = dets[5*i + 3] - dets[5*i + 1] + 1.0f; if (iw < min_size || ih < min_size) { dets[5*i+0] -= min_size / 2; dets[5*i+1] -= min_size / 2; dets[5*i+2] += min_size / 2; dets[5*i+3] += min_size / 2; dets[5*i+4] = -1.0f; } } } inline void _MakeAnchor(float w, float h, float x_ctr, float y_ctr, std::vector<float> *out_anchors) { out_anchors->push_back(x_ctr - 0.5f * (w - 1.0f)); out_anchors->push_back(y_ctr - 0.5f * (h - 1.0f)); out_anchors->push_back(x_ctr + 0.5f * (w - 1.0f)); out_anchors->push_back(y_ctr + 0.5f * (h - 1.0f)); } inline void _Transform(float scale, float ratio, const std::vector<float>& base_anchor, std::vector<float> *out_anchors) { float w = base_anchor[2] - base_anchor[0] + 1.0f; float h = base_anchor[3] - base_anchor[1] + 1.0f; float x_ctr = base_anchor[0] + 0.5 * (w - 1.0f); float y_ctr = base_anchor[1] + 0.5 * (h - 1.0f); float size = w * h; float size_ratios = ::floor(size / ratio); float new_w = ::floor(std::sqrt(size_ratios) + 0.5f) * scale; float new_h = ::floor((new_w / scale * ratio) + 0.5f) * scale; _MakeAnchor(new_w, new_h, x_ctr, y_ctr, out_anchors); } // out_anchors must have shape (n, 5), where n is ratios.size() * scales.size() inline void GenerateAnchors(const std::vector<float>& base_anchor, const nnvm::Tuple<float>& ratios, const nnvm::Tuple<float>& scales, std::vector<float> *out_anchors) { for (size_t j = 0; j < ratios.ndim(); ++j) { for (size_t k = 0; k < scales.ndim(); ++k) { _Transform(scales[k], ratios[j], base_anchor, out_anchors); } } } // greedily keep the max detections __global__ void NonMaximumSuppressionSatelliteCu(float* idets, int post_nms_top_n, int num_images, int num_anchors, int width, int height, int max_gts, float* propsout, float* valid_ranges, float* gt_boxes, float* ids, float* dets) { int pre_nms_top_n = 6000; int i = blockIdx.x; int t = threadIdx.x; int chip_anchors = height*width*num_anchors; int multiplier = pre_nms_top_n; int num_threads = blockDim.x; int chip_index = i*chip_anchors; for (int j = t; j < pre_nms_top_n; j = j + num_threads) { dets[6*i*multiplier + 6*j] = idets[chip_index*6 + 6*(int)ids[chip_index + j]]; dets[6*i*multiplier + 6*j+1] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+1]; dets[6*i*multiplier + 6*j+2] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+2]; dets[6*i*multiplier + 6*j+3] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+3]; dets[6*i*multiplier + 6*j+4] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+4]; dets[6*i*multiplier + 6*j+5] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+5]; } __syncthreads(); int vct = 0; // TODO: change the hard code __shared__ int keeps[1000]; //hard coded, sorry chip_index = i*multiplier; for (int j = chip_index; j < chip_index + pre_nms_top_n && vct < post_nms_top_n; j++) { if (dets[6*j+4] == -1) { continue; } float ix1 = dets[6*j]; float iy1 = dets[6*j+1]; float ix2 = dets[6*j+2]; float iy2 = dets[6*j+3]; float iarea = dets[6*j+5]; if (t == 0) { keeps[vct] = j; } vct = vct + 1; float xx1, xx2, yy1, yy2, w, h, inter, ovr; for (int pind = j + 1 + t; pind < chip_index + pre_nms_top_n; pind = pind + num_threads) { if (dets[6*pind + 4] == -1) { continue; } xx1 = fmaxf(ix1, dets[6*pind]); yy1 = fmaxf(iy1, dets[6*pind + 1]); xx2 = fminf(ix2, dets[6*pind + 2]); yy2 = fminf(iy2, dets[6*pind + 3]); w = fmaxf(0.0f, xx2 - xx1 + 1.0f); h = fmaxf(0.0f, yy2 - yy1 + 1.0f); inter = w * h; ovr = inter / (iarea + dets[6*pind+5] - inter); if (ovr > 0.7) { dets[6*pind + 4] = -1; } } __syncthreads(); } //set default values and assign gt boxes if (t < post_nms_top_n) { if (t < vct) { propsout[5*(i*post_nms_top_n + t)] = i; propsout[5*(i*post_nms_top_n + t) + 1] = dets[6*keeps[t]]; propsout[5*(i*post_nms_top_n + t) + 2] = dets[6*keeps[t]+1]; propsout[5*(i*post_nms_top_n + t) + 3] = dets[6*keeps[t]+2]; propsout[5*(i*post_nms_top_n + t) + 4] = dets[6*keeps[t]+3]; } else { propsout[5*(i*post_nms_top_n + t)] = i; propsout[5*(i*post_nms_top_n + t) + 1] = t % 100; propsout[5*(i*post_nms_top_n + t) + 2] = t % 100; propsout[5*(i*post_nms_top_n + t) + 3] = (t % 100) + 200; propsout[5*(i*post_nms_top_n + t) + 4] = (t % 100) + 200; } if (gt_boxes[5*(i*max_gts + t) + 4] != -1 && t < max_gts) { float x1 = gt_boxes[5*(i*max_gts + t)]; float y1 = gt_boxes[5*(i*max_gts + t)+1]; float x2 = gt_boxes[5*(i*max_gts + t)+2]; float y2 = gt_boxes[5*(i*max_gts + t)+3]; float area = (x2 - x1) * (y2 - y1); if (area < valid_ranges[2*i + 1]*valid_ranges[2*i + 1] && area >= valid_ranges[2*i]*valid_ranges[2*i]) { propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 1] = x1; propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 2] = y1; propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 3] = x2; propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 4] = y2; } } } __syncthreads(); } __global__ void getPropsSatelliteCu(float* boxes, float* deltas, float* im_info, float* anchorbuf, float* scores, float* valid_ranges, int num_images, int anchors, int heights, int widths, int stride, float* scorebuf, float* scoreids) { int num_anchors = anchors * heights * widths; int t = blockDim.x * blockIdx.x + threadIdx.x; if (t < num_images * num_anchors) { int b = t / num_anchors; int index = t % num_anchors; int a = index / (heights*widths); int mat = index % (heights*widths); int w = mat % widths; //width index int h = mat / widths; //height index boxes[6*t] = anchorbuf[4*a] + w * stride; boxes[6*t + 1] = anchorbuf[4*a+1] + h * stride; boxes[6*t + 2] = anchorbuf[4*a+2] + w * stride; boxes[6*t + 3] = anchorbuf[4*a+3] + h * stride; boxes[6*t + 4] = scores[b*num_anchors*2 + ((anchors + a)*heights + h)*widths + w]; float width = boxes[6*t + 2] - boxes[6*t] + 1.0; float height = boxes[6*t + 3] - boxes[6*t + 1] + 1.0; float ctr_x = boxes[6*t + 0] + 0.5 * (width - 1.0); float ctr_y = boxes[6*t + 1] + 0.5 * (height - 1.0); float dx = deltas[b*num_anchors*4 + a*4*widths*heights + h*widths + w]; float dy = deltas[b*num_anchors*4 + (a*4 + 1)*widths*heights + h*widths + w]; float dw = deltas[b*num_anchors*4 + (a*4 + 2)*widths*heights + h*widths + w]; float dh = deltas[b*num_anchors*4 + (a*4 + 3)*widths*heights + h*widths + w]; float pred_ctr_x = dx * width + ctr_x; float pred_ctr_y = dy * height + ctr_y; float pred_w = exp(dw) * width; float pred_h = exp(dh) * height; float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0); float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0); float pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0); float pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0); pred_x1 = fmaxf(fminf(pred_x1, im_info[3*b+1] - 1.0f), 0.0f); pred_y1 = fmaxf(fminf(pred_y1, im_info[3*b] - 1.0f), 0.0f); pred_x2 = fmaxf(fminf(pred_x2, im_info[3*b+1] - 1.0f), 0.0f); pred_y2 = fmaxf(fminf(pred_y2, im_info[3*b] - 1.0f), 0.0f); boxes[6*t] = pred_x1; boxes[6*t + 1] = pred_y1; boxes[6*t + 2] = pred_x2; boxes[6*t + 3] = pred_y2; int min_size = 3; if ((pred_y2 - pred_y1) < min_size && (pred_x2 - pred_x1) < min_size) { boxes[6*t] -= min_size/2; boxes[6*t + 1] -= min_size/2; boxes[6*t + 2] += min_size/2; boxes[6*t + 3] += min_size/2; boxes[6*t + 4] = -1; } float area = (boxes[6*t + 2] - boxes[6*t]) * (boxes[6*t + 3] - boxes[6*t + 1]); if (area >= valid_ranges[2*b+1] * valid_ranges[2*b+1] || area < valid_ranges[2*b]*valid_ranges[2*b]) { boxes[6*t + 4] = -1; } boxes[6*t + 5] = area; scorebuf[t] = boxes[6*t + 4]; scoreids[t] = index; } } } // namespace utils template<typename xpu> class MultiProposalTargetMaskSatelliteGPUOp : public Operator{ public: float *labels; float *bbox_targets; float *bbox_weights; float *crois; float *gt_boxes; float *out_pos_boxes; float *out_pos_ids; explicit MultiProposalTargetMaskSatelliteGPUOp(MultiProposalTargetMaskSatelliteParam param) { this->param_ = param; this->param_.workspace = (param_.workspace << 20) / sizeof(float); // TODO: change the hard code this->crois = new float[1000*param.batch_size*5]; this->labels = new float[1000*param.batch_size]; this->gt_boxes = new float[param.max_gts*param.batch_size*5]; this->bbox_targets = new float[1000*param.batch_size*4]; this->bbox_weights = new float[1000*param.batch_size*4]; this->out_pos_boxes = new float[param.max_masks*param.batch_size*5]; this->out_pos_ids = new float[param.max_masks*param.batch_size]; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { CHECK_EQ(in_data.size(), 5); CHECK_EQ(out_data.size(), 6); using namespace mshadow; using namespace mshadow::expr; //clock_t t; //t = clock(); Stream<gpu> *s = ctx.get_stream<gpu>(); Tensor<gpu, 4> tscores = in_data[proposal::kClsProb].get<gpu, 4, real_t>(s); Tensor<gpu, 4> tbbox_deltas = in_data[proposal::kBBoxPred].get<gpu, 4, real_t>(s); Tensor<gpu, 2> tim_info = in_data[proposal::kImInfo].get<gpu, 2, real_t>(s); Tensor<gpu, 3> tgt_boxes = in_data[proposal::kGTBoxes].get<gpu, 3, real_t>(s); Tensor<gpu, 2> tvalid_ranges = in_data[proposal::kValidRanges].get<gpu, 2, real_t>(s); int max_gts = param_.max_gts; Tensor<gpu, 2> rois = out_data[proposal::kRoIs].get<gpu, 2, real_t>(s); int rpn_post_nms_top_n = param_.rpn_post_nms_top_n; int num_images = tbbox_deltas.size(0); int num_anchors = tbbox_deltas.size(1) / 4; int height = tbbox_deltas.size(2); int width = tbbox_deltas.size(3); int count_anchors = num_anchors*height*width; int total_anchors = count_anchors * num_images; int bufsize = (total_anchors*8 + num_images*rpn_post_nms_top_n*5 + num_anchors*4)*sizeof(float); Tensor<gpu, 1> workspace = ctx.requested[proposal::kTempSpace].get_space_typed<gpu, 1, float>(Shape1(bufsize), s); int pre_nms_top_n = 6000; float* propbuf = workspace.dptr_; float* scorebuf = workspace.dptr_ + total_anchors*6; float* idbuf = workspace.dptr_ + total_anchors*7; float* detbuf = workspace.dptr_ + total_anchors*8; float* anchorbuf = workspace.dptr_ + total_anchors*8 + num_images * 6 * pre_nms_top_n; std::vector<float> base_anchor(4); //usleep(20000000); base_anchor[0] = 0.0; base_anchor[1] = 0.0; base_anchor[2] = param_.feature_stride - 1.0; base_anchor[3] = param_.feature_stride - 1.0; std::vector<float> anchors; utils::GenerateAnchors(base_anchor, param_.ratios, param_.scales, &anchors); unsigned int size = num_anchors*4*sizeof(float); hipMemcpy(anchorbuf, &anchors[0], size, hipMemcpyHostToDevice); //call cuda kernel int threadsPerBlock = NUM_THREADS_NMS; int numblocks = (total_anchors/threadsPerBlock) + 1; hipLaunchKernelGGL(( utils::getPropsSatelliteCu), dim3(numblocks), dim3(threadsPerBlock), 0, 0, propbuf, tbbox_deltas.dptr_, tim_info.dptr_, anchorbuf, tscores.dptr_, tvalid_ranges.dptr_, num_images, num_anchors, height, width, param_.feature_stride, scorebuf, idbuf); std::vector <float> tmp(total_anchors); std::vector<float> ids(total_anchors); hipDeviceSynchronize(); hipMemcpy(&tmp[0], scorebuf, sizeof(float) * num_images * count_anchors, hipMemcpyDeviceToHost); #pragma omp parallel for num_threads(8) for (int i = 0; i < total_anchors; i++) { ids[i] = (float)(i % count_anchors); } #pragma omp parallel for num_threads(8) for (int i = 0; i < num_images; i++) { float basep = count_anchors*i; std::sort(ids.begin() + i*count_anchors, ids.begin() + (i+1)*count_anchors, [&tmp, basep](float i1, float i2) { return tmp[(int)i1 + basep] > tmp[(int)i2 + basep]; }); } hipMemcpy(idbuf, &ids[0], sizeof(float) * num_images * count_anchors, hipMemcpyHostToDevice); hipLaunchKernelGGL(( utils::NonMaximumSuppressionSatelliteCu), dim3(num_images), dim3(threadsPerBlock), 0, 0, propbuf, rpn_post_nms_top_n, num_images, num_anchors, width, height, max_gts, rois.dptr_, tvalid_ranges.dptr_, tgt_boxes.dptr_, idbuf, detbuf); hipDeviceSynchronize(); hipError_t error; error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } hipMemcpy(crois, rois.dptr_, 5*rpn_post_nms_top_n*num_images*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(gt_boxes, tgt_boxes.dptr_, 5*max_gts*num_images*sizeof(float), hipMemcpyDeviceToHost); std::vector <int> numgts_per_image(num_images); for (int i = 0; i < num_images; i++) { numgts_per_image[i] = 0; for (int j = 0; j < max_gts; j++) { if (gt_boxes[i*max_gts*5 + j*5 + 4] != -1) { numgts_per_image[i]++; } } } #pragma omp parallel for num_threads(8) for (int i = 0; i < num_images; i++) { for (int j = 0; j < rpn_post_nms_top_n; j++) { int basepos = rpn_post_nms_top_n*i + j; labels[basepos] = 0; bbox_targets[4*basepos] = 1.0; bbox_targets[4*basepos + 1] = 1.0; bbox_targets[4*basepos + 2] = 1.0; bbox_targets[4*basepos + 3] = 1.0; bbox_weights[4*basepos] = 0.0; bbox_weights[4*basepos + 1] = 0.0; bbox_weights[4*basepos + 2] = 0.0; bbox_weights[4*basepos + 3] = 0.0; } } float *maxids = new float[num_images*rpn_post_nms_top_n]; for (int i = 0; i < num_images*rpn_post_nms_top_n; i++) { maxids[i] = -1; } #pragma omp parallel for num_threads(8) for (int imid = 0; imid < num_images; imid++) { int tpct = 0; int num_gts_this_image = numgts_per_image[imid]; //std::cout << "gtc " << num_gts_this_image << std::endl; int props_this_batch = rpn_post_nms_top_n; if (num_gts_this_image > 0) { float *overlaps = new float[props_this_batch * num_gts_this_image]; float *max_overlaps = new float[props_this_batch]; for (int i = 0; i < props_this_batch; i++) { max_overlaps[i] = 0; } float *max_overlap_ids = new float[props_this_batch]; std::set <int> positive_label_ids; for (int i = 0; i < props_this_batch; i++) { max_overlap_ids[i] = 0; } for (int i = props_this_batch; i < rpn_post_nms_top_n; i++) { labels[imid*rpn_post_nms_top_n + i] = -1; } //get overlaps, maximum overlaps and gt labels for (int i = 0; i < numgts_per_image[imid]; i++) { float x1 = gt_boxes[imid*5*max_gts + i*5]; float y1 = gt_boxes[imid*5*max_gts + i*5 + 1]; float x2 = gt_boxes[imid*5*max_gts + i*5 + 2]; float y2 = gt_boxes[imid*5*max_gts + i*5 + 3]; int pbase; float a1 = (x2 - x1) * (y2 - y1); float xx1, yy1, xx2, yy2, w, h, inter, ovr, a2; for (int j = 0; j < props_this_batch; j++) { pbase = rpn_post_nms_top_n*imid + j; xx1 = ::max(x1, crois[pbase*5 + 1]); yy1 = ::max(y1, crois[pbase*5 + 2]); xx2 = ::min(x2, crois[pbase*5 + 3]); yy2 = ::min(y2, crois[pbase*5 + 4]); w = ::max(0.0f, xx2 - xx1 + 1.0f); h = ::max(0.0f, yy2 - yy1 + 1.0f); a2 = (crois[pbase*5 + 3] - crois[pbase*5 + 1]) * (crois[pbase*5 + 4] - crois[pbase*5 + 2]); inter = w * h; ovr = inter / (a1 + a2 - inter); overlaps[i*num_gts_this_image + j] = ovr; if (overlaps[i*num_gts_this_image + j] > max_overlaps[j] && overlaps[i*num_gts_this_image + j] > 0.5) { max_overlaps[j] = overlaps[i*num_gts_this_image + j]; max_overlap_ids[j] = i; //set labels for positive proposals labels[imid*rpn_post_nms_top_n + j] = gt_boxes[imid*5*max_gts + i*5 + 4]; positive_label_ids.insert(j); tpct = tpct + 1; } } } //p is for proposal and g is for gt, cx is x center and w,h is width and height int pid, gtid; float gx1, gx2, gy1, gy2, px1, px2, py1, py2; float gcx, gcy, gw, gh, pcx, pcy, pw, ph; //generate bbox targets for the positive labels for (auto it = positive_label_ids.begin(); it !=positive_label_ids.end(); it++) { pid = *it; int baseid = (imid*rpn_post_nms_top_n + pid); bbox_weights[baseid*4] = 1; bbox_weights[baseid*4+1] = 1; bbox_weights[baseid*4+2] = 1; bbox_weights[baseid*4+3] = 1; gtid = max_overlap_ids[pid]; maxids[baseid] = gtid; gx1 = gt_boxes[imid*5*max_gts + gtid*5]; gy1 = gt_boxes[imid*5*max_gts + gtid*5 + 1]; gx2 = gt_boxes[imid*5*max_gts + gtid*5 + 2]; gy2 = gt_boxes[imid*5*max_gts + gtid*5 + 3]; gw = gx2 - gx1 + 1; gh = gy2 - gy1 + 1; gcx = gx1 + gw*0.5; gcy = gy1 + gh*0.5; px1 = crois[baseid*5 + 1]; py1 = crois[baseid*5 + 2]; px2 = crois[baseid*5 + 3]; py2 = crois[baseid*5 + 4]; pw = px2 - px1 + 1; ph = py2 - py1 + 1; pcx = px1 + (pw-1)*0.5; pcy = py1 + (ph-1)*0.5; bbox_targets[4*baseid] = 10 * (gcx - pcx) / (pw + 1e-7); bbox_targets[4*baseid+1] = 10 * (gcy - pcy) / (ph + 1e-7); bbox_targets[4*baseid+2] = 5 * log(gw/(pw + 1e-7)); bbox_targets[4*baseid+3] = 5 * log(gh/(ph + 1e-7)); } delete [] max_overlap_ids; delete [] overlaps; delete [] max_overlaps; } } int mask_ct = 0; // TODO: change hard code for (int i = 0; i < num_images*1000; i++) { if (labels[i] > 0 and mask_ct < num_images * param_.max_masks) { out_pos_boxes[5*mask_ct] = crois[5*i]; out_pos_boxes[5*mask_ct+1] = crois[5*i+1]; out_pos_boxes[5*mask_ct+2] = crois[5*i+2]; out_pos_boxes[5*mask_ct+3] = crois[5*i+3]; out_pos_boxes[5*mask_ct+4] = crois[5*i+4]; out_pos_ids[mask_ct] = maxids[i]; mask_ct++; } } for (int i = mask_ct; i < num_images*param_.max_masks; i++) { out_pos_boxes[5*i] = i % num_images; out_pos_boxes[5*i+1] = i % 200; out_pos_boxes[5*i+2] = i % 200; out_pos_boxes[5*i+3] = i % 200 + 100; out_pos_boxes[5*i+4] = i % 200 + 100; out_pos_ids[i] = -1; } delete [] maxids; Stream<gpu> *so = ctx.get_stream<gpu>(); Tensor<gpu, 2> olabels = out_data[proposal::kLabels].get<gpu, 2, real_t>(so); Tensor<gpu, 2> obbox_targets = out_data[proposal::kBboxTarget].get<gpu, 2, real_t>(so); Tensor<gpu, 2> obbox_weights = out_data[proposal::kBboxWeight].get<gpu, 2, real_t>(so); Tensor<gpu, 2> omaskrois = out_data[proposal::kMaskRoIs].get<gpu, 2, real_t>(s); Tensor<gpu, 2> omaskids = out_data[proposal::kMaskIds].get<gpu, 2, real_t>(s); hipMemcpy(omaskrois.dptr_, out_pos_boxes, sizeof(float) * num_images * param_.max_masks * 5, hipMemcpyHostToDevice); hipMemcpy(omaskids.dptr_, out_pos_ids, sizeof(float) * num_images * param_.max_masks, hipMemcpyHostToDevice); // TODO: change hard code hipMemcpy(olabels.dptr_, labels, sizeof(float) * num_images*1000, hipMemcpyHostToDevice); hipMemcpy(obbox_targets.dptr_, bbox_targets, 4*sizeof(float) * num_images*1000, hipMemcpyHostToDevice); hipMemcpy(obbox_weights.dptr_, bbox_weights, 4*sizeof(float) * num_images*1000, hipMemcpyHostToDevice); } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 5); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s); Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s); Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s); Tensor<xpu, 3> ggt_boxes = in_grad[proposal::kGTBoxes].get<xpu, 3, real_t>(s); Tensor<xpu, 2> gvalid_ranges = in_grad[proposal::kValidRanges].get<xpu, 2, real_t>(s); // can not assume the grad would be zero Assign(gscores, req[proposal::kClsProb], 0); Assign(gbbox, req[proposal::kBBoxPred], 0); Assign(ginfo, req[proposal::kImInfo], 0); Assign(ggt_boxes, req[proposal::kGTBoxes], 0); Assign(gvalid_ranges, req[proposal::kValidRanges], 0); } private: MultiProposalTargetMaskSatelliteParam param_; }; // class MultiProposalOp template<> Operator *CreateOp<gpu>(MultiProposalTargetMaskSatelliteParam param) { return new MultiProposalTargetMaskSatelliteGPUOp<gpu>(param); } } // namespace op } // namespace mxnet
634558c59980489fcc6d086b5b82d4d1ad9042b7.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 University of Maryland, College Park * Licensed under The Apache-2.0 License [see LICENSE for details] * \file multi_proposal_target.cc * \brief Proposal target layer * \author Bharat Singh */ #include "./multi_proposal_target_mask_satellite-inl.h" #include <set> #include <math.h> #include <unistd.h> #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include "./operator_common.h" #include "./mshadow_op.h" #include <time.h> #include <stdlib.h> //============================ // Bounding Box Transform Utils //============================ #define NUM_THREADS_NMS 1024 namespace mxnet { namespace op { namespace utils { // filter box by set confidence to zero // * height or width < rpn_min_size inline void FilterBox(float *dets, int num_dets, float min_size) { #pragma omp parallel for num_threads(8) for (int i = 0; i < num_dets; ++i) { float iw = dets[5*i + 2] - dets[5*i] + 1.0f; float ih = dets[5*i + 3] - dets[5*i + 1] + 1.0f; if (iw < min_size || ih < min_size) { dets[5*i+0] -= min_size / 2; dets[5*i+1] -= min_size / 2; dets[5*i+2] += min_size / 2; dets[5*i+3] += min_size / 2; dets[5*i+4] = -1.0f; } } } inline void _MakeAnchor(float w, float h, float x_ctr, float y_ctr, std::vector<float> *out_anchors) { out_anchors->push_back(x_ctr - 0.5f * (w - 1.0f)); out_anchors->push_back(y_ctr - 0.5f * (h - 1.0f)); out_anchors->push_back(x_ctr + 0.5f * (w - 1.0f)); out_anchors->push_back(y_ctr + 0.5f * (h - 1.0f)); } inline void _Transform(float scale, float ratio, const std::vector<float>& base_anchor, std::vector<float> *out_anchors) { float w = base_anchor[2] - base_anchor[0] + 1.0f; float h = base_anchor[3] - base_anchor[1] + 1.0f; float x_ctr = base_anchor[0] + 0.5 * (w - 1.0f); float y_ctr = base_anchor[1] + 0.5 * (h - 1.0f); float size = w * h; float size_ratios = std::floor(size / ratio); float new_w = std::floor(std::sqrt(size_ratios) + 0.5f) * scale; float new_h = std::floor((new_w / scale * ratio) + 0.5f) * scale; _MakeAnchor(new_w, new_h, x_ctr, y_ctr, out_anchors); } // out_anchors must have shape (n, 5), where n is ratios.size() * scales.size() inline void GenerateAnchors(const std::vector<float>& base_anchor, const nnvm::Tuple<float>& ratios, const nnvm::Tuple<float>& scales, std::vector<float> *out_anchors) { for (size_t j = 0; j < ratios.ndim(); ++j) { for (size_t k = 0; k < scales.ndim(); ++k) { _Transform(scales[k], ratios[j], base_anchor, out_anchors); } } } // greedily keep the max detections __global__ void NonMaximumSuppressionSatelliteCu(float* idets, int post_nms_top_n, int num_images, int num_anchors, int width, int height, int max_gts, float* propsout, float* valid_ranges, float* gt_boxes, float* ids, float* dets) { int pre_nms_top_n = 6000; int i = blockIdx.x; int t = threadIdx.x; int chip_anchors = height*width*num_anchors; int multiplier = pre_nms_top_n; int num_threads = blockDim.x; int chip_index = i*chip_anchors; for (int j = t; j < pre_nms_top_n; j = j + num_threads) { dets[6*i*multiplier + 6*j] = idets[chip_index*6 + 6*(int)ids[chip_index + j]]; dets[6*i*multiplier + 6*j+1] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+1]; dets[6*i*multiplier + 6*j+2] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+2]; dets[6*i*multiplier + 6*j+3] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+3]; dets[6*i*multiplier + 6*j+4] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+4]; dets[6*i*multiplier + 6*j+5] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+5]; } __syncthreads(); int vct = 0; // TODO: change the hard code __shared__ int keeps[1000]; //hard coded, sorry chip_index = i*multiplier; for (int j = chip_index; j < chip_index + pre_nms_top_n && vct < post_nms_top_n; j++) { if (dets[6*j+4] == -1) { continue; } float ix1 = dets[6*j]; float iy1 = dets[6*j+1]; float ix2 = dets[6*j+2]; float iy2 = dets[6*j+3]; float iarea = dets[6*j+5]; if (t == 0) { keeps[vct] = j; } vct = vct + 1; float xx1, xx2, yy1, yy2, w, h, inter, ovr; for (int pind = j + 1 + t; pind < chip_index + pre_nms_top_n; pind = pind + num_threads) { if (dets[6*pind + 4] == -1) { continue; } xx1 = fmaxf(ix1, dets[6*pind]); yy1 = fmaxf(iy1, dets[6*pind + 1]); xx2 = fminf(ix2, dets[6*pind + 2]); yy2 = fminf(iy2, dets[6*pind + 3]); w = fmaxf(0.0f, xx2 - xx1 + 1.0f); h = fmaxf(0.0f, yy2 - yy1 + 1.0f); inter = w * h; ovr = inter / (iarea + dets[6*pind+5] - inter); if (ovr > 0.7) { dets[6*pind + 4] = -1; } } __syncthreads(); } //set default values and assign gt boxes if (t < post_nms_top_n) { if (t < vct) { propsout[5*(i*post_nms_top_n + t)] = i; propsout[5*(i*post_nms_top_n + t) + 1] = dets[6*keeps[t]]; propsout[5*(i*post_nms_top_n + t) + 2] = dets[6*keeps[t]+1]; propsout[5*(i*post_nms_top_n + t) + 3] = dets[6*keeps[t]+2]; propsout[5*(i*post_nms_top_n + t) + 4] = dets[6*keeps[t]+3]; } else { propsout[5*(i*post_nms_top_n + t)] = i; propsout[5*(i*post_nms_top_n + t) + 1] = t % 100; propsout[5*(i*post_nms_top_n + t) + 2] = t % 100; propsout[5*(i*post_nms_top_n + t) + 3] = (t % 100) + 200; propsout[5*(i*post_nms_top_n + t) + 4] = (t % 100) + 200; } if (gt_boxes[5*(i*max_gts + t) + 4] != -1 && t < max_gts) { float x1 = gt_boxes[5*(i*max_gts + t)]; float y1 = gt_boxes[5*(i*max_gts + t)+1]; float x2 = gt_boxes[5*(i*max_gts + t)+2]; float y2 = gt_boxes[5*(i*max_gts + t)+3]; float area = (x2 - x1) * (y2 - y1); if (area < valid_ranges[2*i + 1]*valid_ranges[2*i + 1] && area >= valid_ranges[2*i]*valid_ranges[2*i]) { propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 1] = x1; propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 2] = y1; propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 3] = x2; propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 4] = y2; } } } __syncthreads(); } __global__ void getPropsSatelliteCu(float* boxes, float* deltas, float* im_info, float* anchorbuf, float* scores, float* valid_ranges, int num_images, int anchors, int heights, int widths, int stride, float* scorebuf, float* scoreids) { int num_anchors = anchors * heights * widths; int t = blockDim.x * blockIdx.x + threadIdx.x; if (t < num_images * num_anchors) { int b = t / num_anchors; int index = t % num_anchors; int a = index / (heights*widths); int mat = index % (heights*widths); int w = mat % widths; //width index int h = mat / widths; //height index boxes[6*t] = anchorbuf[4*a] + w * stride; boxes[6*t + 1] = anchorbuf[4*a+1] + h * stride; boxes[6*t + 2] = anchorbuf[4*a+2] + w * stride; boxes[6*t + 3] = anchorbuf[4*a+3] + h * stride; boxes[6*t + 4] = scores[b*num_anchors*2 + ((anchors + a)*heights + h)*widths + w]; float width = boxes[6*t + 2] - boxes[6*t] + 1.0; float height = boxes[6*t + 3] - boxes[6*t + 1] + 1.0; float ctr_x = boxes[6*t + 0] + 0.5 * (width - 1.0); float ctr_y = boxes[6*t + 1] + 0.5 * (height - 1.0); float dx = deltas[b*num_anchors*4 + a*4*widths*heights + h*widths + w]; float dy = deltas[b*num_anchors*4 + (a*4 + 1)*widths*heights + h*widths + w]; float dw = deltas[b*num_anchors*4 + (a*4 + 2)*widths*heights + h*widths + w]; float dh = deltas[b*num_anchors*4 + (a*4 + 3)*widths*heights + h*widths + w]; float pred_ctr_x = dx * width + ctr_x; float pred_ctr_y = dy * height + ctr_y; float pred_w = exp(dw) * width; float pred_h = exp(dh) * height; float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0); float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0); float pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0); float pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0); pred_x1 = fmaxf(fminf(pred_x1, im_info[3*b+1] - 1.0f), 0.0f); pred_y1 = fmaxf(fminf(pred_y1, im_info[3*b] - 1.0f), 0.0f); pred_x2 = fmaxf(fminf(pred_x2, im_info[3*b+1] - 1.0f), 0.0f); pred_y2 = fmaxf(fminf(pred_y2, im_info[3*b] - 1.0f), 0.0f); boxes[6*t] = pred_x1; boxes[6*t + 1] = pred_y1; boxes[6*t + 2] = pred_x2; boxes[6*t + 3] = pred_y2; int min_size = 3; if ((pred_y2 - pred_y1) < min_size && (pred_x2 - pred_x1) < min_size) { boxes[6*t] -= min_size/2; boxes[6*t + 1] -= min_size/2; boxes[6*t + 2] += min_size/2; boxes[6*t + 3] += min_size/2; boxes[6*t + 4] = -1; } float area = (boxes[6*t + 2] - boxes[6*t]) * (boxes[6*t + 3] - boxes[6*t + 1]); if (area >= valid_ranges[2*b+1] * valid_ranges[2*b+1] || area < valid_ranges[2*b]*valid_ranges[2*b]) { boxes[6*t + 4] = -1; } boxes[6*t + 5] = area; scorebuf[t] = boxes[6*t + 4]; scoreids[t] = index; } } } // namespace utils template<typename xpu> class MultiProposalTargetMaskSatelliteGPUOp : public Operator{ public: float *labels; float *bbox_targets; float *bbox_weights; float *crois; float *gt_boxes; float *out_pos_boxes; float *out_pos_ids; explicit MultiProposalTargetMaskSatelliteGPUOp(MultiProposalTargetMaskSatelliteParam param) { this->param_ = param; this->param_.workspace = (param_.workspace << 20) / sizeof(float); // TODO: change the hard code this->crois = new float[1000*param.batch_size*5]; this->labels = new float[1000*param.batch_size]; this->gt_boxes = new float[param.max_gts*param.batch_size*5]; this->bbox_targets = new float[1000*param.batch_size*4]; this->bbox_weights = new float[1000*param.batch_size*4]; this->out_pos_boxes = new float[param.max_masks*param.batch_size*5]; this->out_pos_ids = new float[param.max_masks*param.batch_size]; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { CHECK_EQ(in_data.size(), 5); CHECK_EQ(out_data.size(), 6); using namespace mshadow; using namespace mshadow::expr; //clock_t t; //t = clock(); Stream<gpu> *s = ctx.get_stream<gpu>(); Tensor<gpu, 4> tscores = in_data[proposal::kClsProb].get<gpu, 4, real_t>(s); Tensor<gpu, 4> tbbox_deltas = in_data[proposal::kBBoxPred].get<gpu, 4, real_t>(s); Tensor<gpu, 2> tim_info = in_data[proposal::kImInfo].get<gpu, 2, real_t>(s); Tensor<gpu, 3> tgt_boxes = in_data[proposal::kGTBoxes].get<gpu, 3, real_t>(s); Tensor<gpu, 2> tvalid_ranges = in_data[proposal::kValidRanges].get<gpu, 2, real_t>(s); int max_gts = param_.max_gts; Tensor<gpu, 2> rois = out_data[proposal::kRoIs].get<gpu, 2, real_t>(s); int rpn_post_nms_top_n = param_.rpn_post_nms_top_n; int num_images = tbbox_deltas.size(0); int num_anchors = tbbox_deltas.size(1) / 4; int height = tbbox_deltas.size(2); int width = tbbox_deltas.size(3); int count_anchors = num_anchors*height*width; int total_anchors = count_anchors * num_images; int bufsize = (total_anchors*8 + num_images*rpn_post_nms_top_n*5 + num_anchors*4)*sizeof(float); Tensor<gpu, 1> workspace = ctx.requested[proposal::kTempSpace].get_space_typed<gpu, 1, float>(Shape1(bufsize), s); int pre_nms_top_n = 6000; float* propbuf = workspace.dptr_; float* scorebuf = workspace.dptr_ + total_anchors*6; float* idbuf = workspace.dptr_ + total_anchors*7; float* detbuf = workspace.dptr_ + total_anchors*8; float* anchorbuf = workspace.dptr_ + total_anchors*8 + num_images * 6 * pre_nms_top_n; std::vector<float> base_anchor(4); //usleep(20000000); base_anchor[0] = 0.0; base_anchor[1] = 0.0; base_anchor[2] = param_.feature_stride - 1.0; base_anchor[3] = param_.feature_stride - 1.0; std::vector<float> anchors; utils::GenerateAnchors(base_anchor, param_.ratios, param_.scales, &anchors); unsigned int size = num_anchors*4*sizeof(float); cudaMemcpy(anchorbuf, &anchors[0], size, cudaMemcpyHostToDevice); //call cuda kernel int threadsPerBlock = NUM_THREADS_NMS; int numblocks = (total_anchors/threadsPerBlock) + 1; utils::getPropsSatelliteCu<<<numblocks, threadsPerBlock>>>(propbuf, tbbox_deltas.dptr_, tim_info.dptr_, anchorbuf, tscores.dptr_, tvalid_ranges.dptr_, num_images, num_anchors, height, width, param_.feature_stride, scorebuf, idbuf); std::vector <float> tmp(total_anchors); std::vector<float> ids(total_anchors); cudaDeviceSynchronize(); cudaMemcpy(&tmp[0], scorebuf, sizeof(float) * num_images * count_anchors, cudaMemcpyDeviceToHost); #pragma omp parallel for num_threads(8) for (int i = 0; i < total_anchors; i++) { ids[i] = (float)(i % count_anchors); } #pragma omp parallel for num_threads(8) for (int i = 0; i < num_images; i++) { float basep = count_anchors*i; std::sort(ids.begin() + i*count_anchors, ids.begin() + (i+1)*count_anchors, [&tmp, basep](float i1, float i2) { return tmp[(int)i1 + basep] > tmp[(int)i2 + basep]; }); } cudaMemcpy(idbuf, &ids[0], sizeof(float) * num_images * count_anchors, cudaMemcpyHostToDevice); utils::NonMaximumSuppressionSatelliteCu<<<num_images, threadsPerBlock>>>(propbuf, rpn_post_nms_top_n, num_images, num_anchors, width, height, max_gts, rois.dptr_, tvalid_ranges.dptr_, tgt_boxes.dptr_, idbuf, detbuf); cudaDeviceSynchronize(); cudaError_t error; error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemcpy(crois, rois.dptr_, 5*rpn_post_nms_top_n*num_images*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(gt_boxes, tgt_boxes.dptr_, 5*max_gts*num_images*sizeof(float), cudaMemcpyDeviceToHost); std::vector <int> numgts_per_image(num_images); for (int i = 0; i < num_images; i++) { numgts_per_image[i] = 0; for (int j = 0; j < max_gts; j++) { if (gt_boxes[i*max_gts*5 + j*5 + 4] != -1) { numgts_per_image[i]++; } } } #pragma omp parallel for num_threads(8) for (int i = 0; i < num_images; i++) { for (int j = 0; j < rpn_post_nms_top_n; j++) { int basepos = rpn_post_nms_top_n*i + j; labels[basepos] = 0; bbox_targets[4*basepos] = 1.0; bbox_targets[4*basepos + 1] = 1.0; bbox_targets[4*basepos + 2] = 1.0; bbox_targets[4*basepos + 3] = 1.0; bbox_weights[4*basepos] = 0.0; bbox_weights[4*basepos + 1] = 0.0; bbox_weights[4*basepos + 2] = 0.0; bbox_weights[4*basepos + 3] = 0.0; } } float *maxids = new float[num_images*rpn_post_nms_top_n]; for (int i = 0; i < num_images*rpn_post_nms_top_n; i++) { maxids[i] = -1; } #pragma omp parallel for num_threads(8) for (int imid = 0; imid < num_images; imid++) { int tpct = 0; int num_gts_this_image = numgts_per_image[imid]; //std::cout << "gtc " << num_gts_this_image << std::endl; int props_this_batch = rpn_post_nms_top_n; if (num_gts_this_image > 0) { float *overlaps = new float[props_this_batch * num_gts_this_image]; float *max_overlaps = new float[props_this_batch]; for (int i = 0; i < props_this_batch; i++) { max_overlaps[i] = 0; } float *max_overlap_ids = new float[props_this_batch]; std::set <int> positive_label_ids; for (int i = 0; i < props_this_batch; i++) { max_overlap_ids[i] = 0; } for (int i = props_this_batch; i < rpn_post_nms_top_n; i++) { labels[imid*rpn_post_nms_top_n + i] = -1; } //get overlaps, maximum overlaps and gt labels for (int i = 0; i < numgts_per_image[imid]; i++) { float x1 = gt_boxes[imid*5*max_gts + i*5]; float y1 = gt_boxes[imid*5*max_gts + i*5 + 1]; float x2 = gt_boxes[imid*5*max_gts + i*5 + 2]; float y2 = gt_boxes[imid*5*max_gts + i*5 + 3]; int pbase; float a1 = (x2 - x1) * (y2 - y1); float xx1, yy1, xx2, yy2, w, h, inter, ovr, a2; for (int j = 0; j < props_this_batch; j++) { pbase = rpn_post_nms_top_n*imid + j; xx1 = std::max(x1, crois[pbase*5 + 1]); yy1 = std::max(y1, crois[pbase*5 + 2]); xx2 = std::min(x2, crois[pbase*5 + 3]); yy2 = std::min(y2, crois[pbase*5 + 4]); w = std::max(0.0f, xx2 - xx1 + 1.0f); h = std::max(0.0f, yy2 - yy1 + 1.0f); a2 = (crois[pbase*5 + 3] - crois[pbase*5 + 1]) * (crois[pbase*5 + 4] - crois[pbase*5 + 2]); inter = w * h; ovr = inter / (a1 + a2 - inter); overlaps[i*num_gts_this_image + j] = ovr; if (overlaps[i*num_gts_this_image + j] > max_overlaps[j] && overlaps[i*num_gts_this_image + j] > 0.5) { max_overlaps[j] = overlaps[i*num_gts_this_image + j]; max_overlap_ids[j] = i; //set labels for positive proposals labels[imid*rpn_post_nms_top_n + j] = gt_boxes[imid*5*max_gts + i*5 + 4]; positive_label_ids.insert(j); tpct = tpct + 1; } } } //p is for proposal and g is for gt, cx is x center and w,h is width and height int pid, gtid; float gx1, gx2, gy1, gy2, px1, px2, py1, py2; float gcx, gcy, gw, gh, pcx, pcy, pw, ph; //generate bbox targets for the positive labels for (auto it = positive_label_ids.begin(); it !=positive_label_ids.end(); it++) { pid = *it; int baseid = (imid*rpn_post_nms_top_n + pid); bbox_weights[baseid*4] = 1; bbox_weights[baseid*4+1] = 1; bbox_weights[baseid*4+2] = 1; bbox_weights[baseid*4+3] = 1; gtid = max_overlap_ids[pid]; maxids[baseid] = gtid; gx1 = gt_boxes[imid*5*max_gts + gtid*5]; gy1 = gt_boxes[imid*5*max_gts + gtid*5 + 1]; gx2 = gt_boxes[imid*5*max_gts + gtid*5 + 2]; gy2 = gt_boxes[imid*5*max_gts + gtid*5 + 3]; gw = gx2 - gx1 + 1; gh = gy2 - gy1 + 1; gcx = gx1 + gw*0.5; gcy = gy1 + gh*0.5; px1 = crois[baseid*5 + 1]; py1 = crois[baseid*5 + 2]; px2 = crois[baseid*5 + 3]; py2 = crois[baseid*5 + 4]; pw = px2 - px1 + 1; ph = py2 - py1 + 1; pcx = px1 + (pw-1)*0.5; pcy = py1 + (ph-1)*0.5; bbox_targets[4*baseid] = 10 * (gcx - pcx) / (pw + 1e-7); bbox_targets[4*baseid+1] = 10 * (gcy - pcy) / (ph + 1e-7); bbox_targets[4*baseid+2] = 5 * log(gw/(pw + 1e-7)); bbox_targets[4*baseid+3] = 5 * log(gh/(ph + 1e-7)); } delete [] max_overlap_ids; delete [] overlaps; delete [] max_overlaps; } } int mask_ct = 0; // TODO: change hard code for (int i = 0; i < num_images*1000; i++) { if (labels[i] > 0 and mask_ct < num_images * param_.max_masks) { out_pos_boxes[5*mask_ct] = crois[5*i]; out_pos_boxes[5*mask_ct+1] = crois[5*i+1]; out_pos_boxes[5*mask_ct+2] = crois[5*i+2]; out_pos_boxes[5*mask_ct+3] = crois[5*i+3]; out_pos_boxes[5*mask_ct+4] = crois[5*i+4]; out_pos_ids[mask_ct] = maxids[i]; mask_ct++; } } for (int i = mask_ct; i < num_images*param_.max_masks; i++) { out_pos_boxes[5*i] = i % num_images; out_pos_boxes[5*i+1] = i % 200; out_pos_boxes[5*i+2] = i % 200; out_pos_boxes[5*i+3] = i % 200 + 100; out_pos_boxes[5*i+4] = i % 200 + 100; out_pos_ids[i] = -1; } delete [] maxids; Stream<gpu> *so = ctx.get_stream<gpu>(); Tensor<gpu, 2> olabels = out_data[proposal::kLabels].get<gpu, 2, real_t>(so); Tensor<gpu, 2> obbox_targets = out_data[proposal::kBboxTarget].get<gpu, 2, real_t>(so); Tensor<gpu, 2> obbox_weights = out_data[proposal::kBboxWeight].get<gpu, 2, real_t>(so); Tensor<gpu, 2> omaskrois = out_data[proposal::kMaskRoIs].get<gpu, 2, real_t>(s); Tensor<gpu, 2> omaskids = out_data[proposal::kMaskIds].get<gpu, 2, real_t>(s); cudaMemcpy(omaskrois.dptr_, out_pos_boxes, sizeof(float) * num_images * param_.max_masks * 5, cudaMemcpyHostToDevice); cudaMemcpy(omaskids.dptr_, out_pos_ids, sizeof(float) * num_images * param_.max_masks, cudaMemcpyHostToDevice); // TODO: change hard code cudaMemcpy(olabels.dptr_, labels, sizeof(float) * num_images*1000, cudaMemcpyHostToDevice); cudaMemcpy(obbox_targets.dptr_, bbox_targets, 4*sizeof(float) * num_images*1000, cudaMemcpyHostToDevice); cudaMemcpy(obbox_weights.dptr_, bbox_weights, 4*sizeof(float) * num_images*1000, cudaMemcpyHostToDevice); } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 5); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s); Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s); Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s); Tensor<xpu, 3> ggt_boxes = in_grad[proposal::kGTBoxes].get<xpu, 3, real_t>(s); Tensor<xpu, 2> gvalid_ranges = in_grad[proposal::kValidRanges].get<xpu, 2, real_t>(s); // can not assume the grad would be zero Assign(gscores, req[proposal::kClsProb], 0); Assign(gbbox, req[proposal::kBBoxPred], 0); Assign(ginfo, req[proposal::kImInfo], 0); Assign(ggt_boxes, req[proposal::kGTBoxes], 0); Assign(gvalid_ranges, req[proposal::kValidRanges], 0); } private: MultiProposalTargetMaskSatelliteParam param_; }; // class MultiProposalOp template<> Operator *CreateOp<gpu>(MultiProposalTargetMaskSatelliteParam param) { return new MultiProposalTargetMaskSatelliteGPUOp<gpu>(param); } } // namespace op } // namespace mxnet
3e44c7a51056beca0ddbb5df84c768161b8abffe.hip
// !!! This is a file automatically generated by hipify!!! #include "stdafx.h" #define MAXITER 6000 #define CHECKITER 500 #define LUMPTOL 1e-5 #define TOLERANCE 1e-3 #define SPURTOLERANCE 1e-10 #define MAXEIGNUM 10 #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include "getopt.h" #include <time.h> #include <math.h> #include "rocblas.h" #include <assert.h> #include "cutil.h" #include <windows.h> #include "csr.h" #include "unistd.h" #include "fcntl.h" #include <vector> #include <iostream> #include <time.h> #include<fstream> #include "lanczos.h" #include "stencilMVM.h" #include "intervening.h" using namespace std; int main() { int width, height; int N; int fp; fp = open("E:\\ct-contour-reconstruction\\Release\\matrix.dat", O_RDONLY); read(fp, &width, sizeof(int)); read(fp, &height, sizeof(int)); N = width*height; vector<int> rowptr(N+1); vector<int> cols; vector<float> vals; vector<float> temp(81*sizeof(float)*N); read(fp, &temp[0], 81*sizeof(float)*N); close(fp); printf("\n:%d and %d",width,height); int xoffsets[81]; int yoffsets[81]; int p = 0; for(int y=-5;y<=5;y++) { for(int x=-5;x<=5;x++) { if(x*x+y*y <= 25) { xoffsets[p] = x; yoffsets[p] = y; p++; } } } rowptr[0] = 0; int elt=0; for(int y=0;y<height;y++) { for(int x=0;x<width;x++) { int id = y*width+x; for(int o=0;o<81;o++) { if(y+yoffsets[o] >= 0 && y+yoffsets[o]<height && x+xoffsets[o]>=0 && x+xoffsets[o]<width) { cols.push_back( id + yoffsets[o]*width+xoffsets[o]); vals.push_back(temp[ o*width*height + id]); elt++; } } rowptr[id+1] = elt; } } /* Actual code starts here */ int nMatrixDimension = width * height; int radius = 5; CSRMatrix mat(N, elt, &rowptr[0], &cols[0], &vals[0]); dim3 blockDim(XBLOCK, 1); dim3 gridDim((width * height - 1)/XBLOCK + 1, 1); int matrixPitchInFloats = findPitchInFloats(nMatrixDimension); Stencil myStencil(radius, width, height, matrixPitchInFloats); float* devMatrix; printf("Reading matrix from file...\n"); float* hostMatrix = &vals[0]; printf("Copying matrix to GPU\n"); uint nDimension = myStencil.getStencilArea(); hipMalloc((void**)&devMatrix, nDimension * nMatrixDimension * sizeof(float)); CUDA_SAFE_CALL(hipMemcpy(devMatrix, hostMatrix, nMatrixDimension * nDimension * sizeof(float), hipMemcpyHostToDevice)); //intervene(myStencil, devMatrix, &devDstMatrix); float* eigenValues; float* devEigenVectors = 0; float fTolerance = 0.001; generalizedEigensolve(myStencil, devMatrix, matrixPitchInFloats, MAXEIGNUM, &eigenValues, &devEigenVectors, fTolerance); return 1; }
3e44c7a51056beca0ddbb5df84c768161b8abffe.cu
#include "stdafx.h" #define MAXITER 6000 #define CHECKITER 500 #define LUMPTOL 1e-5 #define TOLERANCE 1e-3 #define SPURTOLERANCE 1e-10 #define MAXEIGNUM 10 #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include "getopt.h" #include <time.h> #include <math.h> #include "cublas.h" #include <assert.h> #include "cutil.h" #include <windows.h> #include "csr.h" #include "unistd.h" #include "fcntl.h" #include <vector> #include <iostream> #include <time.h> #include<fstream> #include "lanczos.h" #include "stencilMVM.h" #include "intervening.h" using namespace std; int main() { int width, height; int N; int fp; fp = open("E:\\ct-contour-reconstruction\\Release\\matrix.dat", O_RDONLY); read(fp, &width, sizeof(int)); read(fp, &height, sizeof(int)); N = width*height; vector<int> rowptr(N+1); vector<int> cols; vector<float> vals; vector<float> temp(81*sizeof(float)*N); read(fp, &temp[0], 81*sizeof(float)*N); close(fp); printf("\n¾ØÕóάÊýΪ:%d and %d£¡",width,height); int xoffsets[81]; int yoffsets[81]; int p = 0; for(int y=-5;y<=5;y++) { for(int x=-5;x<=5;x++) { if(x*x+y*y <= 25) { xoffsets[p] = x; yoffsets[p] = y; p++; } } } rowptr[0] = 0; int elt=0; for(int y=0;y<height;y++) { for(int x=0;x<width;x++) { int id = y*width+x; for(int o=0;o<81;o++) { if(y+yoffsets[o] >= 0 && y+yoffsets[o]<height && x+xoffsets[o]>=0 && x+xoffsets[o]<width) { cols.push_back( id + yoffsets[o]*width+xoffsets[o]); vals.push_back(temp[ o*width*height + id]); elt++; } } rowptr[id+1] = elt; } } /* Actual code starts here */ int nMatrixDimension = width * height; int radius = 5; CSRMatrix mat(N, elt, &rowptr[0], &cols[0], &vals[0]); dim3 blockDim(XBLOCK, 1); dim3 gridDim((width * height - 1)/XBLOCK + 1, 1); int matrixPitchInFloats = findPitchInFloats(nMatrixDimension); Stencil myStencil(radius, width, height, matrixPitchInFloats); float* devMatrix; printf("Reading matrix from file...\n"); float* hostMatrix = &vals[0]; printf("Copying matrix to GPU\n"); uint nDimension = myStencil.getStencilArea(); cudaMalloc((void**)&devMatrix, nDimension * nMatrixDimension * sizeof(float)); CUDA_SAFE_CALL(cudaMemcpy(devMatrix, hostMatrix, nMatrixDimension * nDimension * sizeof(float), cudaMemcpyHostToDevice)); //intervene(myStencil, devMatrix, &devDstMatrix); float* eigenValues; float* devEigenVectors = 0; float fTolerance = 0.001; generalizedEigensolve(myStencil, devMatrix, matrixPitchInFloats, MAXEIGNUM, &eigenValues, &devEigenVectors, fTolerance); return 1; }
23a510fbe3a2518bffb7e1ab472988db0cd0dc12.hip
// !!! This is a file automatically generated by hipify!!! //Author: Adriel Kim //6-27-2020 //Updated 7-5-2020 //Updated 7-7-2020 //Timing with CUDA events to measure PCIe data throughput /* Desc: Basic 2D matrix operations - element-wise addition, subtraction, multiplication, and division. To do: - Use vector instead of array? - Error handling for cuda events using a wrapper function - Be able to test for varying sizes of images. (For now we manually define with constant N) - Add timer to compare CPU and GPU implementations - Double check if all memory is freed - Optimize by eliminating redundant calculations - Test code on department servers */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <sys/time.h> #include <iostream> #include <fstream> #include <cstring> #include <string> #include <stdlib.h> #include <stdio.h> //define imin(a,b) (a<b?a:b)//example of ternary operator in c++ //4176,2048 #define R 4176 #define C 2048 #define N (R*C)//# of elements in matrices const int threadsPerBlock = 1024;//threads in a block. A chunk that shares the same shared memory. const int blocksPerGrid = 8352;//imin(32, (N + threadsPerBlock - 1) / threadsPerBlock);//this will be our output array size for sumKernel. using namespace std; hipError_t matrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation, float* kernel_runtime, float* GPU_transfer_time, float* cuda_htod_elapsed_time, float* cuda_kernel_elapsed_time, float* cuda_dtoh_elapsed_time,float* cuda_total_time); void CPUMatrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation); long long start_timer(); long long stop_timer(long long start_time, const char *name); //any advantages with mapping directly to strucutre of matrix? We're just representing 2D matrix using 1D array... //it would be difficult to do the above since we want the operations to occur over abitrarily large matrices //this can definitely be optimzied by elminating redundant calculations __global__ void matrixAddKernel(double *c, const double *a, const double *b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] + b[tid]; //adds total number of running threads to tid, the current index. tid += blockDim.x * gridDim.x; } } __global__ void matrixSubtractKernel(double* c, const double* a, const double* b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] - b[tid]; //adds total number of running threads to tid, the current index. tid += blockDim.x * gridDim.x; } } __global__ void matrixMultiplyKernel(double* c, const double* a, const double* b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } } __global__ void matrixDivideKernel(double* c, const double* a, const double* b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = (a[tid]/b[tid]); tid += blockDim.x * gridDim.x; } } //--------------------------------------------------------------------------------- void CPUMatrixAdd(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] + b[i]; } } void CPUMatrixSubtract(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] - b[i]; } } void CPUMatrixMultiply(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] * b[i]; } } void CPUMatrixDivide(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] / b[i]; } } //---------------------------------------------------------------------------------- void printMatrix(double* arr) { for (int i = 0;i < R; i++) { for (int k = 0;k < C; k++) { cout << (arr[k + i * R])<<" "; } cout << endl; } } void getFileSize(string fileName){ ofstream binFile; ifstream file; string line; binFile.open("tempBinFile.bin");//I know it's super hacky and gross. file.open("./FitTextFiles/"+fileName); if(file.is_open()){ while(getline(file, line)){ binFile << line; } file.close(); } streampos begin, end; ifstream tempFile ("tempBinFile.bin", ios::binary); begin = tempFile.tellg(); tempFile.seekg(0, ios::end); end = tempFile.tellg(); tempFile.close(); cout<<"size is: "<< (end-begin)<<" bytes.\n"<<endl; remove("tempBinFile.bin"); } void fillWithFitImage(string fileName, double* arr){ string line; ifstream fitsFile; fitsFile.open("./FitTextFiles/"+fileName); int ind = 0; if(fitsFile.is_open()){ getline(fitsFile, line);//gets rid of initial part, which is just shape } if(fitsFile.is_open()){ while(getline(fitsFile, line) && ind < N){ char cstr[line.size()+1]; strcpy(cstr, line.c_str()); double num = atof(cstr); //add num to arr arr[ind] = num; ind++; } fitsFile.close(); } else{ cout<<"Error opening file"<<endl; } } void fillWithRandomNumbers(double* arr, int arrSize){ for(int i = 0; i<arrSize;i++){ arr[i] = rand() % 100+1; } } void writeImageToText(double *img, string name, int arrSize){ ofstream file; file.open(name); for(int i = 0; i<arrSize;i++){ file << img[i] << "\n"; } file.close(); } void writeResultToText(string fileName, double result){ ofstream file; file.open(fileName, ios_base::app | ios_base::out); file << result << "\n"; file.close(); } bool checkEquality(double* arr1, double* arr2, int arrSize){ for(int i = 0;i < arrSize;i++){ if (arr1[i]!=arr2[i]){ return false; } } return true; } double getArraySize(int arrSize){ return arrSize*sizeof(double); } int main() { //CUDA Timing variables float* cuda_htod = (float*)malloc(sizeof(float)); float* cuda_kernel_time = (float*)malloc(sizeof(float)); float* cuda_dtoh = (float*)malloc(sizeof(float)); float* cuda_total_time = (float*)malloc(sizeof(float)); //CPU timing variables float* GPU_kernel_time = (float*)malloc(sizeof(float)); float* GPU_transfer_time = (float*)malloc(sizeof(float)); //Must allocate host memory first before calling kernel. double* outputs = (double*)malloc(N * sizeof(double)); double* doubleMatrix = (double*)malloc(N * sizeof(double)); double* doubleMatrix2 = (double*)malloc(N * sizeof(double)); double* CPUoutputs = (double*)malloc(N * sizeof(double)); int operation = 0; cout << "Enter which operation (1 = add, 2 = subtract, 3 = multiply, 4 = divide)" << endl; cin >> operation; //populated 2D array with data cout<<"Populating image data"<<endl; //fillWithFitImage("imgraw1.txt", doubleMatrix); //fillWithFitImage("img1.txt", doubleMatrix2); fillWithRandomNumbers(doubleMatrix, N); fillWithRandomNumbers(doubleMatrix2, N); double arr1Size = getArraySize(N); double arr2Size = getArraySize(N); double outArrSize = getArraySize(N); cout<<"Size of raw image (bytes): " << arr1Size<<endl; cout<<"Size of bias image (bytes): "<<arr2Size<<endl; //getFileSize("imgraw1.txt"); //getFileSize("img1.txt"); cout<<"GPU Start!\n"<<endl; hipError_t cudaStatus = matrixOperation(outputs, doubleMatrix, doubleMatrix2, N, operation,GPU_kernel_time, GPU_transfer_time, cuda_htod, cuda_kernel_time, cuda_dtoh, cuda_total_time); cout << "CPU Start!\n" << endl; CPUMatrixOperation(CPUoutputs, doubleMatrix, doubleMatrix2, N, operation); cout << "CPU DONE!" << endl; //printMatrix(CPUoutputs); bool equal = checkEquality(outputs, CPUoutputs, N); if(equal == true) cout<<"CPU and GPU outputs are equal"<<endl; else cout<<"CPU and GPU outputs are NOT equal"<<endl; //writeImageToText(outputs,"gpuFit.txt", N); free(outputs); free(doubleMatrix); free(doubleMatrix2); free(CPUoutputs); free(GPU_kernel_time); free(cuda_dtoh); free(cuda_htod); free(cuda_kernel_time); free(cuda_total_time); return 0; } hipError_t matrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation, float* kernel_runtime, float* GPU_transfer_time, float* cuda_htod_elapsed_time, float* cuda_kernel_elapsed_time, float* cuda_dtoh_elapsed_time, float* cuda_total_time) { double* dev_a = 0; double* dev_b = 0; double* dev_c = 0; float kernel_time = 0; float transfer_time = 0; hipError_t cudaStatus; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } //Allocate GPU buffers for three vectors (two input, one output) cudaStatus = hipMalloc((void**)&dev_c, N * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! 1"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, N * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! 2"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, N * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! 3"); goto Error; } //Copy input vectors from host memory to GPU buffers cudaStatus = hipMemcpyAsync(dev_a, a, sizeof(double) * N, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! 1"); goto Error; } cudaStatus = hipMemcpyAsync(dev_b, b, sizeof(double) * N, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! 2"); goto Error; } switch (operation) { case 1: matrixAddKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; case 2: matrixSubtractKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; case 3: matrixMultiplyKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; case 4: matrixDivideKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; } //copies result to host so we can use it. cudaStatus = hipMemcpy(c, dev_c, sizeof(double) * N, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! 3"); goto Error; } Error: cout<<"Cuda memory freed"<<endl; hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } /* c is output, a and b are input arrays to perform operation, arrSize is size of array, operation is operation type */ void CPUMatrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation){ switch (operation) { case 1: CPUMatrixAdd(c, a, b, arrSize); break; case 2: CPUMatrixSubtract(c, a, b, arrSize); break; case 3: CPUMatrixMultiply(c, a, b, arrSize); break; case 4: CPUMatrixDivide(c, a, b, arrSize); break; } } // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // converts a long long ns value to float seconds float usToSec(long long time) { return ((float)time)/(1000000); } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, const char *name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; float elapsed = usToSec(end_time - start_time); printf("%s: %.5f sec\n", name, elapsed); return end_time - start_time; }
23a510fbe3a2518bffb7e1ab472988db0cd0dc12.cu
//Author: Adriel Kim //6-27-2020 //Updated 7-5-2020 //Updated 7-7-2020 //Timing with CUDA events to measure PCIe data throughput /* Desc: Basic 2D matrix operations - element-wise addition, subtraction, multiplication, and division. To do: - Use vector instead of array? - Error handling for cuda events using a wrapper function - Be able to test for varying sizes of images. (For now we manually define with constant N) - Add timer to compare CPU and GPU implementations - Double check if all memory is freed - Optimize by eliminating redundant calculations - Test code on department servers */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <sys/time.h> #include <iostream> #include <fstream> #include <cstring> #include <string> #include <stdlib.h> #include <stdio.h> //define imin(a,b) (a<b?a:b)//example of ternary operator in c++ //4176,2048 #define R 4176 #define C 2048 #define N (R*C)//# of elements in matrices const int threadsPerBlock = 1024;//threads in a block. A chunk that shares the same shared memory. const int blocksPerGrid = 8352;//imin(32, (N + threadsPerBlock - 1) / threadsPerBlock);//this will be our output array size for sumKernel. using namespace std; cudaError_t matrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation, float* kernel_runtime, float* GPU_transfer_time, float* cuda_htod_elapsed_time, float* cuda_kernel_elapsed_time, float* cuda_dtoh_elapsed_time,float* cuda_total_time); void CPUMatrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation); long long start_timer(); long long stop_timer(long long start_time, const char *name); //any advantages with mapping directly to strucutre of matrix? We're just representing 2D matrix using 1D array... //it would be difficult to do the above since we want the operations to occur over abitrarily large matrices //this can definitely be optimzied by elminating redundant calculations __global__ void matrixAddKernel(double *c, const double *a, const double *b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] + b[tid]; //adds total number of running threads to tid, the current index. tid += blockDim.x * gridDim.x; } } __global__ void matrixSubtractKernel(double* c, const double* a, const double* b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] - b[tid]; //adds total number of running threads to tid, the current index. tid += blockDim.x * gridDim.x; } } __global__ void matrixMultiplyKernel(double* c, const double* a, const double* b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } } __global__ void matrixDivideKernel(double* c, const double* a, const double* b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid] = (a[tid]/b[tid]); tid += blockDim.x * gridDim.x; } } //--------------------------------------------------------------------------------- void CPUMatrixAdd(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] + b[i]; } } void CPUMatrixSubtract(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] - b[i]; } } void CPUMatrixMultiply(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] * b[i]; } } void CPUMatrixDivide(double* c, const double* a, const double* b, unsigned int arrSize){ for(int i = 0; i < arrSize; i++){ c[i] = a[i] / b[i]; } } //---------------------------------------------------------------------------------- void printMatrix(double* arr) { for (int i = 0;i < R; i++) { for (int k = 0;k < C; k++) { cout << (arr[k + i * R])<<" "; } cout << endl; } } void getFileSize(string fileName){ ofstream binFile; ifstream file; string line; binFile.open("tempBinFile.bin");//I know it's super hacky and gross. file.open("./FitTextFiles/"+fileName); if(file.is_open()){ while(getline(file, line)){ binFile << line; } file.close(); } streampos begin, end; ifstream tempFile ("tempBinFile.bin", ios::binary); begin = tempFile.tellg(); tempFile.seekg(0, ios::end); end = tempFile.tellg(); tempFile.close(); cout<<"size is: "<< (end-begin)<<" bytes.\n"<<endl; remove("tempBinFile.bin"); } void fillWithFitImage(string fileName, double* arr){ string line; ifstream fitsFile; fitsFile.open("./FitTextFiles/"+fileName); int ind = 0; if(fitsFile.is_open()){ getline(fitsFile, line);//gets rid of initial part, which is just shape } if(fitsFile.is_open()){ while(getline(fitsFile, line) && ind < N){ char cstr[line.size()+1]; strcpy(cstr, line.c_str()); double num = atof(cstr); //add num to arr arr[ind] = num; ind++; } fitsFile.close(); } else{ cout<<"Error opening file"<<endl; } } void fillWithRandomNumbers(double* arr, int arrSize){ for(int i = 0; i<arrSize;i++){ arr[i] = rand() % 100+1; } } void writeImageToText(double *img, string name, int arrSize){ ofstream file; file.open(name); for(int i = 0; i<arrSize;i++){ file << img[i] << "\n"; } file.close(); } void writeResultToText(string fileName, double result){ ofstream file; file.open(fileName, ios_base::app | ios_base::out); file << result << "\n"; file.close(); } bool checkEquality(double* arr1, double* arr2, int arrSize){ for(int i = 0;i < arrSize;i++){ if (arr1[i]!=arr2[i]){ return false; } } return true; } double getArraySize(int arrSize){ return arrSize*sizeof(double); } int main() { //CUDA Timing variables float* cuda_htod = (float*)malloc(sizeof(float)); float* cuda_kernel_time = (float*)malloc(sizeof(float)); float* cuda_dtoh = (float*)malloc(sizeof(float)); float* cuda_total_time = (float*)malloc(sizeof(float)); //CPU timing variables float* GPU_kernel_time = (float*)malloc(sizeof(float)); float* GPU_transfer_time = (float*)malloc(sizeof(float)); //Must allocate host memory first before calling kernel. double* outputs = (double*)malloc(N * sizeof(double)); double* doubleMatrix = (double*)malloc(N * sizeof(double)); double* doubleMatrix2 = (double*)malloc(N * sizeof(double)); double* CPUoutputs = (double*)malloc(N * sizeof(double)); int operation = 0; cout << "Enter which operation (1 = add, 2 = subtract, 3 = multiply, 4 = divide)" << endl; cin >> operation; //populated 2D array with data cout<<"Populating image data"<<endl; //fillWithFitImage("imgraw1.txt", doubleMatrix); //fillWithFitImage("img1.txt", doubleMatrix2); fillWithRandomNumbers(doubleMatrix, N); fillWithRandomNumbers(doubleMatrix2, N); double arr1Size = getArraySize(N); double arr2Size = getArraySize(N); double outArrSize = getArraySize(N); cout<<"Size of raw image (bytes): " << arr1Size<<endl; cout<<"Size of bias image (bytes): "<<arr2Size<<endl; //getFileSize("imgraw1.txt"); //getFileSize("img1.txt"); cout<<"GPU Start!\n"<<endl; cudaError_t cudaStatus = matrixOperation(outputs, doubleMatrix, doubleMatrix2, N, operation,GPU_kernel_time, GPU_transfer_time, cuda_htod, cuda_kernel_time, cuda_dtoh, cuda_total_time); cout << "CPU Start!\n" << endl; CPUMatrixOperation(CPUoutputs, doubleMatrix, doubleMatrix2, N, operation); cout << "CPU DONE!" << endl; //printMatrix(CPUoutputs); bool equal = checkEquality(outputs, CPUoutputs, N); if(equal == true) cout<<"CPU and GPU outputs are equal"<<endl; else cout<<"CPU and GPU outputs are NOT equal"<<endl; //writeImageToText(outputs,"gpuFit.txt", N); free(outputs); free(doubleMatrix); free(doubleMatrix2); free(CPUoutputs); free(GPU_kernel_time); free(cuda_dtoh); free(cuda_htod); free(cuda_kernel_time); free(cuda_total_time); return 0; } cudaError_t matrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation, float* kernel_runtime, float* GPU_transfer_time, float* cuda_htod_elapsed_time, float* cuda_kernel_elapsed_time, float* cuda_dtoh_elapsed_time, float* cuda_total_time) { double* dev_a = 0; double* dev_b = 0; double* dev_c = 0; float kernel_time = 0; float transfer_time = 0; cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } //Allocate GPU buffers for three vectors (two input, one output) cudaStatus = cudaMalloc((void**)&dev_c, N * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! 1"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, N * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! 2"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, N * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! 3"); goto Error; } //Copy input vectors from host memory to GPU buffers cudaStatus = cudaMemcpyAsync(dev_a, a, sizeof(double) * N, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! 1"); goto Error; } cudaStatus = cudaMemcpyAsync(dev_b, b, sizeof(double) * N, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! 2"); goto Error; } switch (operation) { case 1: matrixAddKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; case 2: matrixSubtractKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; case 3: matrixMultiplyKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; case 4: matrixDivideKernel << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b); break; } //copies result to host so we can use it. cudaStatus = cudaMemcpy(c, dev_c, sizeof(double) * N, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! 3"); goto Error; } Error: cout<<"Cuda memory freed"<<endl; cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } /* c is output, a and b are input arrays to perform operation, arrSize is size of array, operation is operation type */ void CPUMatrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation){ switch (operation) { case 1: CPUMatrixAdd(c, a, b, arrSize); break; case 2: CPUMatrixSubtract(c, a, b, arrSize); break; case 3: CPUMatrixMultiply(c, a, b, arrSize); break; case 4: CPUMatrixDivide(c, a, b, arrSize); break; } } // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // converts a long long ns value to float seconds float usToSec(long long time) { return ((float)time)/(1000000); } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, const char *name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; float elapsed = usToSec(end_time - start_time); printf("%s: %.5f sec\n", name, elapsed); return end_time - start_time; }
4c12641dba78da18cdfa13ad402d8a5d8d54c833.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <memory> #include "dau_conv/base_dau_conv_layer.hpp" #include "dau_conv/dau_conv_impl/dau_conv_forward.hpp" #include "dau_conv/dau_conv_impl/dau_conv_backward.hpp" #include "dau_conv/util/math_functions.hpp" #include "dau_conv/util/convolve.hpp" namespace DAUConvNet { template <typename Dtype> void BaseDAUConvLayer<Dtype>::Forward_gpu(const Dtype* bottom_data, const vector<int> bottom_shape, Dtype* top_data, const vector<int> top_shape) { // - first perform gaussian bluring based on variance that is fixed over the whole layer (use CuDNN for that) // - then perform forward pass with our custom kernel // - optionally add bias M_Assert(this->is_data_on_gpu() == true, "Forward_gpu requires data on GPU, but is_data_on_gpu() returned false !"); clock_t start_t = clock(); // check if we need to do merging of components; // make sure we check based on steps done in backpropagation and we should avoid merging if only forward is called (by default current_iteration_index=0 so start at second iter bool do_merginig_optmization = this->unit_merge_iteration_step > 0 && (this->current_iteration_index + 1) % this->unit_merge_iteration_step == 0 ? true : false; // if during training then merge components if needed if (do_merginig_optmization) { //merge_components(); } // before we get params we need to ensure params are within valid bounds { // we still need to ensure our values are within valid bounds // clip sigma, mu1 and mu2 to within bounds caffe_gpu_clip_lower(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, this->unit_sigma_lower_bound, this->param_sigma(), this->param_sigma()); Dtype mu1_lower_limit = this->offsets_already_centered_ == false ? (Dtype)unit_border_bound : (-1* (int)(this->kernel_w_/2) + unit_border_bound); Dtype mu2_lower_limit = this->offsets_already_centered_ == false ? (Dtype)unit_border_bound : (-1* (int)(this->kernel_h_/2) + unit_border_bound); Dtype mu1_upper_limit = this->offsets_already_centered_ == false ? this->kernel_w_-1 - (Dtype)unit_border_bound : ((int)(this->kernel_w_/2) - unit_border_bound); Dtype mu2_upper_limit = this->offsets_already_centered_ == false ? this->kernel_h_-1 - (Dtype)unit_border_bound : ((int)(this->kernel_h_/2) - unit_border_bound); caffe_gpu_clip_lower(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu1_lower_limit, this->param_mu1(), this->param_mu1()); caffe_gpu_clip_lower(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu2_lower_limit, this->param_mu2(), this->param_mu2()); caffe_gpu_clip_upper(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu1_upper_limit, this->param_mu1(), this->param_mu1()); caffe_gpu_clip_upper(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu2_upper_limit, this->param_mu2(), this->param_mu2()); } const int height_out = top_shape[this->channel_axis_ + 1]; const int width_out = top_shape[this->channel_axis_ + 2]; // get filter for gaussian blur step const Dtype* gauss_kernel = this->get_gaussian_kernel(stream_[0]); // get buffers for all parameters that we learn const Dtype* filter_weights = this->param_w(); const Dtype* filter_offsets_float_mu1 = this->param_mu1(); const Dtype* filter_offsets_float_mu2 = this->param_mu2(); hipEvent_t memset_top, memset_filter; CUDA_CHECK(hipEventCreate(&memset_top)); CUDA_CHECK(hipEventCreate(&memset_filter)); { // intermediate data for blurred input Dtype* interm_data = this->temp_interm_buffer(); // convolve with kernel { caffe_gpu_set_async<Dtype>(this->conv_out_channels_* this->batch_num_* this->height_out_* this->width_out_, (Dtype)0, top_data, paralel_streams[0]); caffe_gpu_set_async<Dtype>(buffer_fwd_.filtered_images_sizes_ / sizeof(float), (Dtype)0, buffer_fwd_.filtered_images, paralel_streams[1]); CUDA_CHECK(hipEventRecord(memset_top, paralel_streams[0])); CUDA_CHECK(hipEventRecord(memset_filter, paralel_streams[1])); conv2_data_desc sig_desc(1, this->conv_in_channels_* this->batch_num_, this->height_, this->width_, this->conv_in_channels_* this->batch_num_*this->height_*this->width_, this->height_*this->width_, this->width_, 1); conv2_data_desc filt_desc(1,1,this->aggregation.kernel_h_,this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_w_, 1); conv2_data_desc out_desc = sig_desc; caffe_gpu_convolve2(interm_data, out_desc, bottom_data, sig_desc, gauss_kernel, filt_desc, stream_[0]); CUDA_CHECK(hipStreamWaitEvent(stream_[0], memset_top, 0)); CUDA_CHECK(hipStreamWaitEvent(stream_[0], memset_filter, 0)); } this->forward_obj->forward_pass(interm_data, filter_offsets_float_mu1, filter_offsets_float_mu2, filter_weights, DAUConvForward<Dtype>::SGF, this->kernel_w_, this->kernel_h_, this->offsets_already_centered_, top_data, buffer_fwd_.filtered_images, NULL, NULL, buffer_fwd_.filter_offsets_and_weights, stream_[0]); // add bias if needed if (this->bias_term_) { const Dtype* bias_data = this->param_bias(); this->forward_gpu_bias(top_data, bias_data); } } CUDA_CHECK(hipEventDestroy(memset_top)); CUDA_CHECK(hipEventDestroy(memset_filter)); } template <typename Dtype> void BaseDAUConvLayer<Dtype>::Backward_gpu(const Dtype* top_data, const Dtype* top_error, const vector<int>& top_shape, bool propagate_down, const Dtype* bottom_data, Dtype* bottom_error, const vector<int>& bottom_shape, const vector<bool>& params_propagate_down ) { // - first convolve bottom input data with kernels for individual parameters (w, mu1, mu2, sigma) // - then compute and collect gradients by shifting convolved bottom input data and multiplying it with the top error data // - finally back-propagade the error by convolving top error with the rotated filters (we can use the same function as for forward-pass, but need to transpose mu1 and mu2 values) M_Assert(this->is_data_on_gpu() == true, "Backward_gpu requires data on GPU, but is_data_on_gpu() returned false !"); this->current_iteration_index++; //return; // get buffers for all parameters that we learn const Dtype* filter_weights = this->param_w(); const Dtype* filter_offsets_float_mu1 = this->param_mu1(); const Dtype* filter_offsets_float_mu2 = this->param_mu2(); Dtype* param_weights_diff = this->param_w_grad(); Dtype* param_mu1_diff = this->param_mu1_grad(); Dtype* param_mu2_diff = this->param_mu2_grad(); Dtype* param_sigma_diff = this->param_sigma_grad(); Dtype* bias_diff = this->param_bias_grad(); Dtype* bwd_gradients_data = this->temp_bwd_gradients(); // get filters for back-propagation const Dtype* deriv_error_kernel = this->get_deriv_kernel_error(stream_[0]); // get filters for param gradients const Dtype* deriv_kernels_data = this->get_deriv_kernel_params(stream_[0]); // intermediate data for blurred input Dtype* interm_data = this->temp_interm_buffer(); // transform all four accumulated gradients into seperabe buffers of size [S x G x F] int param_size = this->units_per_channel * this->conv_in_channels_ * this->conv_out_channels_; // make sure gradient aggregation buffer is zeroed caffe_gpu_memset(param_size * NUM_K * sizeof(Dtype), 0, bwd_gradients_data); hipEvent_t memset_top, memset_filter, memset_error; CUDA_CHECK(hipEventCreate(&memset_top)); CUDA_CHECK(hipEventCreate(&memset_filter)); CUDA_CHECK(hipEventCreate(&memset_error)); { // Gradient w.r.t. bias. if (this->bias_term_ && params_propagate_down[4]) { this->backward_gpu_bias(bias_diff, top_error); } // Gradient w.r.t w,mu1,mu2 and sigma if (params_propagate_down[0]) { // TODO: if it is faster we should add zeroing to input prepare functions !! // convolve with kernel { caffe_gpu_set_async(this->buffer_bwd_.filtered_images_sizes_/sizeof(Dtype), (Dtype)0, this->buffer_bwd_.filtered_images, paralel_streams[0]); caffe_gpu_set_async(this->buffer_bwd_.error_image_sizes_/sizeof(Dtype), (Dtype)0, this->buffer_bwd_.error_images, paralel_streams[1]); CUDA_CHECK(hipEventRecord(memset_filter, paralel_streams[0])); CUDA_CHECK(hipEventRecord(memset_error, paralel_streams[1])); conv2_data_desc sig_desc(this->conv_in_channels_* this->batch_num_, 1, this->height_, this->width_, this->height_*this->width_, this->height_*this->width_, this->width_, 1); conv2_data_desc filt_desc(1,this->NUM_K,this->aggregation.kernel_h_,this->aggregation.kernel_w_, this->NUM_K * this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_w_, 1); conv2_data_desc out_desc(this->conv_in_channels_* this->batch_num_, this->NUM_K, this->height_, this->width_, this->height_*this->width_ * this->NUM_K, this->height_*this->width_, this->width_, 1); caffe_gpu_convolve2(interm_data, out_desc, bottom_data, sig_desc, deriv_kernels_data, filt_desc, stream_[0]); CUDA_CHECK(hipStreamWaitEvent(stream_[0], memset_filter, 0)); CUDA_CHECK(hipStreamWaitEvent(stream_[0], memset_error, 0)); } // collect gradients by shifting convolved bottom input data and multiplying it with the top error data // WARNING: if this->kernel_w_ or this->kernel_h_ changes then memory will not be allocated properly backward_grad_obj->backward_pass(interm_data, top_error, filter_offsets_float_mu1, filter_offsets_float_mu2, filter_weights, this->kernel_w_, this->kernel_h_, this->offsets_already_centered_, bwd_gradients_data, this->buffer_bwd_.filtered_images, this->buffer_bwd_.error_images, this->buffer_bwd_.filter_weights, this->buffer_bwd_.filter_offsets, //this->ignore_edge_gradients_, stream_[0]); this->ignore_edge_gradients_, 0); } // finally perform back-propagation of the error values if (propagate_down) { Dtype const* top_error_for_bwd = top_error; // if size top_error (input) is smaller then interm_data (output) (i.e. expected input should be the same size as output) // then we need to copy top_error to bigger buffer i.e. with padded zeros if (buffer_bwd_.resized_top_for_bwd_sizes_ > 0) { // set zeros caffe_gpu_set_async<Dtype>(buffer_bwd_.resized_top_for_bwd_sizes_ / sizeof(float), (Dtype)0, buffer_bwd_.resized_top_for_bwd, stream_[0]); // then copy but with appropriate padding caffe_gpu_pad2d(this->batch_num_ * this->conv_out_channels_, this->height_out_, this->width_out_, this->width_/2 - this->width_out_/2, top_error, buffer_bwd_.resized_top_for_bwd, stream_[0]); top_error_for_bwd = buffer_bwd_.resized_top_for_bwd; } // convolve with kernels { // NOTE: memory buffer is shared with gradient compute so make sure not to zero it before backward_grad_obj->backward_pass is done caffe_gpu_set_async<Dtype>(this->conv_in_channels_* this->batch_num_* this->height_* this->width_, (Dtype)0, bottom_error, paralel_streams[0]); caffe_gpu_set_async<Dtype>(buffer_fwd_.filtered_images_sizes_ / sizeof(float), (Dtype)0, buffer_fwd_.filtered_images, paralel_streams[1]); CUDA_CHECK(hipEventRecord(memset_top, paralel_streams[0])); CUDA_CHECK(hipEventRecord(memset_filter, paralel_streams[1])); int max_width = ::max(this->width_out_,this->width_); int max_height = ::max(this->height_out_,this->height_); conv2_data_desc sig_desc(1, this->conv_out_channels_* this->batch_num_, max_height, max_width, this->conv_out_channels_* this->batch_num_*max_height*max_width, max_height*max_width, max_width, 1); conv2_data_desc filt_desc(1,1,this->aggregation.kernel_h_,this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_w_, 1); conv2_data_desc out_desc = sig_desc; caffe_gpu_convolve2(interm_data, out_desc, top_error_for_bwd, sig_desc, deriv_error_kernel, filt_desc, stream_[0]); CUDA_CHECK(hipStreamWaitEvent(stream_[0], memset_top, 0)); CUDA_CHECK(hipStreamWaitEvent(stream_[0], memset_filter, 0)); } // then use our custom kernel for forwarding, however we need to transpose kernels, which in our case means // that we need to rotate mu1,mu2 locations // get param buffer for mu1 and mu2 that will be rotated Dtype *param_mu1_backprop = this->temp_param_buffer() + 0 * param_size; Dtype *param_mu2_backprop = this->temp_param_buffer() + 1 * param_size; // rot(mu) = (kernel_w-1) - mu { caffe_gpu_memcpy_async(param_size * sizeof(float), filter_offsets_float_mu1, param_mu1_backprop, 0); caffe_gpu_memcpy_async(param_size * sizeof(float), filter_offsets_float_mu2, param_mu2_backprop, 0); caffe_gpu_scal(param_size, (Dtype)-1, param_mu1_backprop, cublas_handle); caffe_gpu_scal(param_size, (Dtype)-1, param_mu2_backprop, cublas_handle); // if params are already centered then nothing else needed if (this->offsets_already_centered_ == false) { caffe_gpu_add_scalar(param_size, (Dtype) (this->kernel_w_ - 1), param_mu1_backprop); caffe_gpu_add_scalar(param_size, (Dtype) (this->kernel_h_ - 1), param_mu2_backprop); } } // now we take the blured error data and perform sum over shifted input data with our custom kernel i.e. forward pass this->backward_backporp_obj->forward_pass(interm_data, param_mu1_backprop, param_mu2_backprop, filter_weights, DAUConvForward<Dtype>::FGS, this->kernel_w_, this->kernel_h_, this->offsets_already_centered_, bottom_error, buffer_fwd_.filtered_images, NULL, NULL, buffer_fwd_.filter_offsets_and_weights, stream_[0]); } } // we need to accumulate gradients to the final buffer and add weights to some derivates if (params_propagate_down[0] || params_propagate_down[1] || params_propagate_down[2] || params_propagate_down[3]) { // multiply gradients with appropriate weights /// add add weight multiplyer as specifed by derivative formula only for mu1,mu2 and sigma if (NUM_K > 1 && params_propagate_down[1]) caffe_gpu_mul(param_size, bwd_gradients_data + 1 * param_size, filter_weights, bwd_gradients_data + 1 * param_size); // mu1 if (NUM_K > 2 && params_propagate_down[2]) caffe_gpu_mul(param_size, bwd_gradients_data + 2 * param_size, filter_weights, bwd_gradients_data + 2 * param_size); // mu2 if (NUM_K > 3 && params_propagate_down[3]) caffe_gpu_mul(param_size, bwd_gradients_data + 3 * param_size, filter_weights, bwd_gradients_data + 3 * param_size); // sigma // for weight gradient we only accumulate to final buffer if (NUM_K > 0 && params_propagate_down[0]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 0 * param_size, param_weights_diff, cublas_handle); // w if (NUM_K > 1 && params_propagate_down[1]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 1 * param_size, param_mu1_diff, cublas_handle); // mu1 if (NUM_K > 2 && params_propagate_down[2]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 2 * param_size, param_mu2_diff, cublas_handle); // mu2 if (NUM_K > 3 && params_propagate_down[3]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 3 * param_size, param_sigma_diff, cublas_handle); // sigma // if we need to ignore last few gauss then make sure we do not update their parameters if (this->num_units_ignore > 0) { this->set_last_n_gauss_to_zero(param_weights_diff, this->num_units_ignore); this->set_last_n_gauss_to_zero(param_mu1_diff, this->num_units_ignore); this->set_last_n_gauss_to_zero(param_mu2_diff, this->num_units_ignore); this->set_last_n_gauss_to_zero(param_sigma_diff, this->num_units_ignore); } } CUDA_CHECK(hipEventDestroy(memset_top)); CUDA_CHECK(hipEventDestroy(memset_filter)); CUDA_CHECK(hipEventDestroy(memset_error)); } template <typename Dtype> __global__ void set_last_n_gauss_to_zero_kernel(const int S, const int G, const int F, Dtype* x, int num_gauss_zero) { CUDA_KERNEL_LOOP(index, S*G*F) { int f = (index % F) ; int sg = index / F; int g = (sg % G); int s = sg / G; if (g >= G - num_gauss_zero) x[index] = 0; } } template <typename Dtype> void BaseDAUConvLayer<Dtype>::set_last_n_gauss_to_zero(Dtype* array, int num_gauss_zero){ hipLaunchKernelGGL(( set_last_n_gauss_to_zero_kernel<Dtype>), dim3(CUDA_GET_BLOCKS(this->conv_in_channels_ * this->units_per_channel * this->conv_out_channels_)), dim3(CUDA_NUM_THREADS), 0, 0, this->conv_in_channels_, this->units_per_channel, this->conv_out_channels_, array, num_gauss_zero); } // TODO: we could speed-up with vectorized read/write // pre-compute sigma inverse values needed in Gaussian distribution (1/sigma^2, 1/sigma^3 and 1/2*1/sigma^2) template <typename Dtype> __global__ void conv_gauss_precompute_sigma_kernel(const int n, Dtype* buf_sigma, Dtype* buf_sigma_square_inv, Dtype* buf_sigma_cube_inv, Dtype* buf_sigma_square_inv_half, const int sigma_lower_bound) { CUDA_KERNEL_LOOP(index, n) { Dtype sigma_value = buf_sigma[index]; Dtype sigma2 = sigma_value * sigma_value; Dtype sigma2_inv = 1/sigma2; buf_sigma[index] = sigma_value; buf_sigma_square_inv[index] = sigma2_inv; buf_sigma_cube_inv[index] = 1/(sigma2 * sigma_value); buf_sigma_square_inv_half[index] = (0.5 * sigma2_inv) ; } } template <typename Dtype> __global__ void conv_gauss_distributions_kernel(const int N, const int k_w, int k_h, bool offsets_already_centered, const Dtype* W, const Dtype* MU1, const Dtype* MU2, const Dtype* SIGMA_2_INV, const Dtype* SIGMA_3_INV, const Dtype* SIGMA_2_INV_HALF, Dtype* guass_dist, Dtype* guass_deriv_mu1, Dtype* guass_deriv_mu2, Dtype* guass_deriv_sigma) { const int filter_size = k_w * k_h; for (int n = blockIdx.z * blockDim.z + threadIdx.z; n < N; n += blockDim.z * gridDim.z){ // read w, mu1, mu2, sigma and other data needed to compute gaussian Distributions //const Dtype w = W[n]; const Dtype mu1 = MU1[n] + (offsets_already_centered ? (int)(k_w/2) : 0); const Dtype mu2 = MU2[n] + (offsets_already_centered ? (int)(k_h/2) : 0); const Dtype sigma_square_inv = SIGMA_2_INV[n]; const Dtype sigma_square_inv_half = SIGMA_2_INV_HALF[n]; const Dtype sigma_cube_inv = SIGMA_3_INV[n]; // blockDim by x and y should always be 1 since whole filter will always fit into one block, so just retrive filter x,y indeces and calculate gaussians const int x = threadIdx.x; const int y = threadIdx.y; const Dtype dist_x = x - mu1; const Dtype dist_x_2 = dist_x*dist_x; const Dtype dist_y = y - mu2; const Dtype dist_y_2 = dist_y*dist_y; const Dtype dist = dist_x_2 + dist_y_2; const Dtype gauss_value = exp( -dist * sigma_square_inv_half); const int ptr_offset = n * filter_size + y * k_w + x; guass_dist[ptr_offset] = gauss_value; guass_deriv_mu1[ptr_offset] = (dist_x * sigma_square_inv) * gauss_value; guass_deriv_mu2[ptr_offset] = (dist_y * sigma_square_inv) * gauss_value; guass_deriv_sigma[ptr_offset] = (dist * sigma_cube_inv) * gauss_value; } } template <typename Dtype> __global__ void scal_kernel_batched(const int n, const Dtype* a, const Dtype* x, Dtype* y, const int m) { for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < m; j += blockDim.y * gridDim.y) { Dtype a_value = a[j]; for (int i = j * n + blockIdx.x * blockDim.x + threadIdx.x; i < n* (1 + j) ; i += blockDim.x * gridDim.x) { y[i] = a_value * x[i]; } } } template <typename Dtype> __global__ void axpby_kernel_batched(const int n, const Dtype a_factor, const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y, const int m) { for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < m; j += blockDim.y * gridDim.y) { Dtype a_value = a[j] * a_factor; Dtype b_value = b[j]; for (int i = j * n + blockIdx.x * blockDim.x + threadIdx.x; i < n * (1 + j); i += blockDim.x * gridDim.x) { y[i] = a_value * x[i] + b_value * y[i]; } } } template <typename Dtype> __global__ void add_sorted_kernel(const int S, const int G, const int F, const int n, const Dtype* unsorted_input, Dtype* sorted_output) { for (int f = blockIdx.z * blockDim.z + threadIdx.z; f < F; f += blockDim.z * gridDim.z) { for (int s = blockIdx.y * blockDim.y + threadIdx.y; s < S; s += blockDim.y * gridDim.y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { Dtype sum_g = 0; for (int g = 0; g < G; ++g) { sum_g += unsorted_input[ ((s*G + g)*F + f )*n + i]; } sorted_output[(f*S + s)*n + i] = sum_g; } } } } template <typename Dtype> __global__ void inv_kernel(const int n, const Dtype* x, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = 1 / x[index]; } } template <typename Dtype> __global__ void mirror_kernel(const int S, const int F, const int n, const Dtype* x, Dtype* y) { for (int f = blockIdx.z * blockDim.z + threadIdx.z; f < F; f += blockDim.z * gridDim.z) { for (int s = blockIdx.y * blockDim.y + threadIdx.y; s < S; s += blockDim.y * gridDim.y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { // perform kernel mirroring by setting y[i] = x[n-i -1] // at the same time switch S and F indexes y[(s*F + f) * n + i] = x[(f*S + s) * n + n - i -1]; } } } } template <typename Dtype> void BaseDAUKernelCompute<Dtype>::get_kernels(BaseDAUKernelParams<Dtype>& input, BaseDAUKernelOutput<Dtype>& output, hipblasHandle_t cublas_handle) { // we get mutable ptr but we do not modify it, this is just poor code in part of the CUB code int* tmp_precomp_index_gpu = this->precomp_index(); clock_t start_t = clock(); Dtype* weight = output.weight(); const Dtype* gauss_params_w = input.weight(); Dtype* gauss_params_mu1 = input.mu1(); Dtype* gauss_params_mu2 = input.mu2(); Dtype* gauss_params_sigma = input.sigma(); Dtype* gauss_params_sigma_square_inv = this->param_temp(SIGMA_SQUARE_INV); Dtype* gauss_params_sigma_cube_inv = this->param_temp(SIGMA_CUBE_INV); Dtype* gauss_params_sigma_square_inv_half = this->param_temp(SIGMA_SQUARE_INV_HALF); const int S = this->num_in_channels; const int F = this->num_out_channels; const int G = this->num_gauss; const int K_w = this->kernel_w; const int K_h = this->kernel_h; // clip sigma, mu1 and mu2 to within bounds caffe_gpu_clip_lower(S*F*G, this->sigma_lower_bound, gauss_params_sigma, gauss_params_sigma); Dtype mu1_lower_limit = this->offsets_already_centered == false ? (Dtype)component_border_bound : (-1* (int)(kernel_w/2) + component_border_bound); Dtype mu2_lower_limit = this->offsets_already_centered == false ? (Dtype)component_border_bound : (-1* (int)(kernel_h/2) + component_border_bound); Dtype mu1_upper_limit = this->offsets_already_centered == false ? kernel_w-1 - (Dtype)component_border_bound : ((int)(kernel_w/2) - component_border_bound); Dtype mu2_upper_limit = this->offsets_already_centered == false ? kernel_h-1 - (Dtype)component_border_bound : ((int)(kernel_h/2) - component_border_bound); caffe_gpu_clip_lower(S*F*G, mu1_lower_limit, gauss_params_mu1, gauss_params_mu1); caffe_gpu_clip_lower(S*F*G, mu2_lower_limit, gauss_params_mu2, gauss_params_mu2); caffe_gpu_clip_upper(S*F*G, mu1_upper_limit, gauss_params_mu1, gauss_params_mu1); caffe_gpu_clip_upper(S*F*G, mu2_upper_limit, gauss_params_mu2, gauss_params_mu2); // 0. precompute sigma^2, sigma^3 and (sigma^2)/2 hipLaunchKernelGGL(( conv_gauss_precompute_sigma_kernel<Dtype>), dim3(CUDA_GET_BLOCKS(S*G*F)), dim3(CUDA_NUM_THREADS), 0, 0, S*G*F, gauss_params_sigma, gauss_params_sigma_square_inv, gauss_params_sigma_cube_inv, gauss_params_sigma_square_inv_half, this->sigma_lower_bound); // 1. for each pixel in [SxGxF] x [K_w x K_h] compute G (Gauss distribution), dG/dx, dG/dy, dG/dsigma // cuda dimension X runs over K_w, Y over K_h and dimension Z over all filters // we translate cuda thread X,Y dimensions directly to filter indexces of size K_w, K_h and assign cuda thread Z dimension with // several filters to fill as many CAFFE_CUDA_NUM_THREADS threads available (i.e. multiple filter can be processed in one cuda block) dim3 threadsPerBlock(K_w, K_h, CUDA_NUM_THREADS/(K_w * K_h)); dim3 numBlocks(1, 1, (S*G*F + threadsPerBlock.z - 1) / threadsPerBlock.z); Dtype* gauss_dist = this->kernels_temp(GAUSS_DIST); size_t d_param_size = S * G* F* K_h * K_w; Dtype* deriv_weight = output.d_params() + 0 * d_param_size; Dtype* deriv_mu1 = output.d_params() + 1 * d_param_size; Dtype* deriv_mu2 = output.d_params() + 2 * d_param_size; Dtype* deriv_sigma = output.d_params() + 3 * d_param_size; hipLaunchKernelGGL(( conv_gauss_distributions_kernel<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, S*G*F, K_w, K_h, this->offsets_already_centered, gauss_params_w, gauss_params_mu1, gauss_params_mu2, gauss_params_sigma_square_inv, gauss_params_sigma_cube_inv, gauss_params_sigma_square_inv_half, gauss_dist, deriv_mu1, deriv_mu2, deriv_sigma); // 2. for each filter (G, dG/dx, dG/dy, dG/dsigma) calculate sums (use different sums if using normalization by square sum) Dtype* guass_norm = this->param_temp(GAUSS_NORM); Dtype* deriv_mu1_sums = this->param_temp(DERIV_MU1_SUMS); Dtype* deriv_mu2_sums = this->param_temp(DERIV_MU2_SUMS); Dtype* deriv_sigma_sums = this->param_temp(DERIV_SIGMA_SUMS); // TODO: all three sums can be done in parallel, do we need seperate streams to make this run in parallel ? if (this->use_unit_normalization == false) { // if there is no normalization then there should be no derivative of normalization caffe_gpu_set((S*F*G), (Dtype)0, deriv_mu1_sums); caffe_gpu_set((S*F*G), (Dtype)0, deriv_mu2_sums); caffe_gpu_set((S*F*G), (Dtype)0, deriv_sigma_sums); } else if (this->use_square_unit_normalization) { // when using square gauss normalization derivatives dG/dx, dG/dy, dG/dsigma need to be multiplied by un-weighted, un-normalized gaussian dstirubution i.e. gauss_dist Dtype* deriv_mu1_times_gauss_dist = this->kernels_temp(DERIV_MU1_TIMES_GAUSS_DIST); Dtype* deriv_mu2_times_gauss_dist = this->kernels_temp(DERIV_MU2_TIMES_GAUSS_DIST); Dtype* deriv_sigma_times_gauss_dist = this->kernels_temp(DERIV_SIGMA_TIMES_GAUSS_DIST); caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, deriv_mu1, deriv_mu1_times_gauss_dist); // deriv_mu1_times_gauss_dist = gauss_dist * deriv_mu1; caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, deriv_mu2, deriv_mu2_times_gauss_dist); // deriv_mu2_times_gauss_dist = gauss_dist * deriv_mu2; caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, deriv_sigma, deriv_sigma_times_gauss_dist); // deriv_sigma_times_gauss_dist = gauss_dist * deriv_sigma; caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu1_times_gauss_dist, deriv_mu1_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu2_times_gauss_dist, deriv_mu2_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_sigma_times_gauss_dist, deriv_sigma_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_scal((S*F*G), (Dtype)2, deriv_mu1_sums, cublas_handle); caffe_gpu_scal((S*F*G), (Dtype)2, deriv_mu2_sums, cublas_handle); caffe_gpu_scal((S*F*G), (Dtype)2, deriv_sigma_sums, cublas_handle); } else { caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu1, deriv_mu1_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu2, deriv_mu2_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_sigma, deriv_sigma_sums, S*F*G, tmp_precomp_index_gpu); } if (this->use_unit_normalization == false) { // set guass_norm to 1 if we sould NOT normalize to sum of 1 caffe_gpu_set((S*F*G), (Dtype)1, guass_norm); } else if (this->use_square_unit_normalization) { // we need to normalize to sum of squares to 1 Dtype* gauss_dist_square = this->kernels_temp(GAUSS_DIST_SQUARE); caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, gauss_dist, gauss_dist_square); // gauss_dist_square = gauss_dist * gauss_dist; caffe_gpu_sum((S*F*G) * (K_w*K_h), gauss_dist_square, guass_norm, S*F*G, tmp_precomp_index_gpu); } else { // we need to normalize to sum of 1 caffe_gpu_sum((S*F*G) * (K_w*K_h), gauss_dist, guass_norm, S*F*G, tmp_precomp_index_gpu); } // invert guass_norm i.e. guass_norm = 1/guass_norm hipLaunchKernelGGL(( inv_kernel<Dtype>), dim3(CUDA_GET_BLOCKS(S*G*F)), dim3(CUDA_NUM_THREADS), 0, 0, S*G*F, guass_norm, guass_norm); // gauss_mu1_sum = abs(gauss_mu1_sum) > 1e-10 ? gauss_mu1_sum : 0; caffe_gpu_clip_eps(S*F*G, (Dtype)1e-10, deriv_mu1_sums, deriv_mu1_sums); caffe_gpu_clip_eps(S*F*G, (Dtype)1e-10, deriv_mu2_sums, deriv_mu2_sums); // 3. for each filter G and derivative filters dG/dx, dG/dy, dG/dsigma apply its normalization terms threadsPerBlock = dim3(K_w* K_h, CUDA_NUM_THREADS/(K_w * K_h)); numBlocks = dim3(1, (S*F*G + threadsPerBlock.y - 1) / threadsPerBlock.y); // deriv_weight = gauss_dist * guass_norm hipLaunchKernelGGL(( scal_kernel_batched<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, K_w * K_h, guass_norm, gauss_dist, deriv_weight, S*F*G); // !after! weight and deriv_weight are computed we can add weight to guass_norm which will be used in remaining derivateives and main kernel caffe_gpu_mul(S*F*G, gauss_params_w, guass_norm, guass_norm); // guass_norm = gauss_params_w / guass_norm; // apply gauss normalization factors directly to sums to avoid additional call to scal_kernel_batched caffe_gpu_mul(S*F*G, guass_norm, deriv_mu1_sums, deriv_mu1_sums); // deriv_mu1_sums = deriv_mu1_sums * guass_norm; caffe_gpu_mul(S*F*G, guass_norm, deriv_mu2_sums, deriv_mu2_sums); // deriv_mu2_sums = deriv_mu2_sums * guass_norm; caffe_gpu_mul(S*F*G, guass_norm, deriv_sigma_sums, deriv_sigma_sums); // deriv_sigma_sums = deriv_sigma_sums * guass_norm; // create normalized derivative filters hipLaunchKernelGGL(( axpby_kernel_batched<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, K_w * K_h, (Dtype)-1, deriv_mu1_sums, deriv_weight, guass_norm, deriv_mu1, S*F*G); hipLaunchKernelGGL(( axpby_kernel_batched<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, K_w * K_h, (Dtype)-1, deriv_mu2_sums, deriv_weight, guass_norm, deriv_mu2, S*F*G); hipLaunchKernelGGL(( axpby_kernel_batched<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, K_w * K_h, (Dtype)-1, deriv_sigma_sums, deriv_weight, guass_norm, deriv_sigma, S*F*G); // 4. calculate main kernel weights by applying gauss norm and weights, and suming over SxGxF kernels into FxS kernels (in correct order i.e. rearagning them at the same time) // gauss_dist = w/norm * gauss_dist (note, guass_norm should be w/norm) hipLaunchKernelGGL(( scal_kernel_batched<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, K_w * K_h, guass_norm, gauss_dist, gauss_dist, S*F*G); threadsPerBlock = dim3(K_w*K_h, sqrt(CUDA_NUM_THREADS/(K_w * K_h) ), sqrt(CUDA_NUM_THREADS/(K_w * K_h) ) ); numBlocks = dim3(1, (S + threadsPerBlock.y - 1) / threadsPerBlock.y, (F + threadsPerBlock.z - 1) / threadsPerBlock.z); hipLaunchKernelGGL(( add_sorted_kernel<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, S, G, F, K_w*K_h, gauss_dist, weight); // 4. calculate seperable filters (WILL NOT IMPLEMENET) // 5. create error kernel for back-propagation by reversing the kernel Dtype* deriv_error = output.d_error(); threadsPerBlock = dim3(K_w*K_h, sqrt(CUDA_NUM_THREADS/(K_w * K_h) ), sqrt(CUDA_NUM_THREADS/(K_w * K_h) ) ); numBlocks = dim3(1, (S + threadsPerBlock.y - 1) / threadsPerBlock.y, (F + threadsPerBlock.z - 1) / threadsPerBlock.z); hipLaunchKernelGGL(( mirror_kernel<Dtype>), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, S, F, K_w*K_h, weight, deriv_error); //hipDeviceSynchronize(); clock_t end_t = clock(); } template void BaseDAUKernelCompute<float>::get_kernels(BaseDAUKernelParams<float>& input, BaseDAUKernelOutput<float>& output, hipblasHandle_t cublas_handle); template void BaseDAUKernelCompute<double>::get_kernels(BaseDAUKernelParams<double>& input, BaseDAUKernelOutput<double>& output, hipblasHandle_t cublas_handle); template void BaseDAUConvLayer<double>::set_last_n_gauss_to_zero(double* array, int num_gauss_zero); template void BaseDAUConvLayer<float>::set_last_n_gauss_to_zero(float* array, int num_gauss_zero); template void BaseDAUConvLayer<double>::Forward_gpu(const double* bottom_data, const vector<int> bottom_shape, double* top_data, const vector<int> top_shape); template void BaseDAUConvLayer<float>::Forward_gpu(const float* bottom_data, const vector<int> bottom_shape, float* top_data, const vector<int> top_shape); template void BaseDAUConvLayer<double>::Backward_gpu(const double* top_data, const double* top_error, const vector<int>& top_shape, bool propagate_down, const double* bottom_data, double* bottom_error, const vector<int>& bottom_shape, const vector<bool>& params_propagate_down ); template void BaseDAUConvLayer<float>::Backward_gpu(const float* top_data, const float* top_error, const vector<int>& top_shape, bool propagate_down, const float* bottom_data, float* bottom_error, const vector<int>& bottom_shape, const vector<bool>& params_propagate_down ); } // namespace dau_conv_impl
4c12641dba78da18cdfa13ad402d8a5d8d54c833.cu
#include <vector> #include <memory> #include "dau_conv/base_dau_conv_layer.hpp" #include "dau_conv/dau_conv_impl/dau_conv_forward.hpp" #include "dau_conv/dau_conv_impl/dau_conv_backward.hpp" #include "dau_conv/util/math_functions.hpp" #include "dau_conv/util/convolve.hpp" namespace DAUConvNet { template <typename Dtype> void BaseDAUConvLayer<Dtype>::Forward_gpu(const Dtype* bottom_data, const vector<int> bottom_shape, Dtype* top_data, const vector<int> top_shape) { // - first perform gaussian bluring based on variance that is fixed over the whole layer (use CuDNN for that) // - then perform forward pass with our custom kernel // - optionally add bias M_Assert(this->is_data_on_gpu() == true, "Forward_gpu requires data on GPU, but is_data_on_gpu() returned false !"); clock_t start_t = clock(); // check if we need to do merging of components; // make sure we check based on steps done in backpropagation and we should avoid merging if only forward is called (by default current_iteration_index=0 so start at second iter bool do_merginig_optmization = this->unit_merge_iteration_step > 0 && (this->current_iteration_index + 1) % this->unit_merge_iteration_step == 0 ? true : false; // if during training then merge components if needed if (do_merginig_optmization) { //merge_components(); } // before we get params we need to ensure params are within valid bounds { // we still need to ensure our values are within valid bounds // clip sigma, mu1 and mu2 to within bounds caffe_gpu_clip_lower(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, this->unit_sigma_lower_bound, this->param_sigma(), this->param_sigma()); Dtype mu1_lower_limit = this->offsets_already_centered_ == false ? (Dtype)unit_border_bound : (-1* (int)(this->kernel_w_/2) + unit_border_bound); Dtype mu2_lower_limit = this->offsets_already_centered_ == false ? (Dtype)unit_border_bound : (-1* (int)(this->kernel_h_/2) + unit_border_bound); Dtype mu1_upper_limit = this->offsets_already_centered_ == false ? this->kernel_w_-1 - (Dtype)unit_border_bound : ((int)(this->kernel_w_/2) - unit_border_bound); Dtype mu2_upper_limit = this->offsets_already_centered_ == false ? this->kernel_h_-1 - (Dtype)unit_border_bound : ((int)(this->kernel_h_/2) - unit_border_bound); caffe_gpu_clip_lower(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu1_lower_limit, this->param_mu1(), this->param_mu1()); caffe_gpu_clip_lower(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu2_lower_limit, this->param_mu2(), this->param_mu2()); caffe_gpu_clip_upper(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu1_upper_limit, this->param_mu1(), this->param_mu1()); caffe_gpu_clip_upper(this->conv_in_channels_*this->units_per_channel*this->conv_out_channels_, mu2_upper_limit, this->param_mu2(), this->param_mu2()); } const int height_out = top_shape[this->channel_axis_ + 1]; const int width_out = top_shape[this->channel_axis_ + 2]; // get filter for gaussian blur step const Dtype* gauss_kernel = this->get_gaussian_kernel(stream_[0]); // get buffers for all parameters that we learn const Dtype* filter_weights = this->param_w(); const Dtype* filter_offsets_float_mu1 = this->param_mu1(); const Dtype* filter_offsets_float_mu2 = this->param_mu2(); cudaEvent_t memset_top, memset_filter; CUDA_CHECK(cudaEventCreate(&memset_top)); CUDA_CHECK(cudaEventCreate(&memset_filter)); { // intermediate data for blurred input Dtype* interm_data = this->temp_interm_buffer(); // convolve with kernel { caffe_gpu_set_async<Dtype>(this->conv_out_channels_* this->batch_num_* this->height_out_* this->width_out_, (Dtype)0, top_data, paralel_streams[0]); caffe_gpu_set_async<Dtype>(buffer_fwd_.filtered_images_sizes_ / sizeof(float), (Dtype)0, buffer_fwd_.filtered_images, paralel_streams[1]); CUDA_CHECK(cudaEventRecord(memset_top, paralel_streams[0])); CUDA_CHECK(cudaEventRecord(memset_filter, paralel_streams[1])); conv2_data_desc sig_desc(1, this->conv_in_channels_* this->batch_num_, this->height_, this->width_, this->conv_in_channels_* this->batch_num_*this->height_*this->width_, this->height_*this->width_, this->width_, 1); conv2_data_desc filt_desc(1,1,this->aggregation.kernel_h_,this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_w_, 1); conv2_data_desc out_desc = sig_desc; caffe_gpu_convolve2(interm_data, out_desc, bottom_data, sig_desc, gauss_kernel, filt_desc, stream_[0]); CUDA_CHECK(cudaStreamWaitEvent(stream_[0], memset_top, 0)); CUDA_CHECK(cudaStreamWaitEvent(stream_[0], memset_filter, 0)); } this->forward_obj->forward_pass(interm_data, filter_offsets_float_mu1, filter_offsets_float_mu2, filter_weights, DAUConvForward<Dtype>::SGF, this->kernel_w_, this->kernel_h_, this->offsets_already_centered_, top_data, buffer_fwd_.filtered_images, NULL, NULL, buffer_fwd_.filter_offsets_and_weights, stream_[0]); // add bias if needed if (this->bias_term_) { const Dtype* bias_data = this->param_bias(); this->forward_gpu_bias(top_data, bias_data); } } CUDA_CHECK(cudaEventDestroy(memset_top)); CUDA_CHECK(cudaEventDestroy(memset_filter)); } template <typename Dtype> void BaseDAUConvLayer<Dtype>::Backward_gpu(const Dtype* top_data, const Dtype* top_error, const vector<int>& top_shape, bool propagate_down, const Dtype* bottom_data, Dtype* bottom_error, const vector<int>& bottom_shape, const vector<bool>& params_propagate_down ) { // - first convolve bottom input data with kernels for individual parameters (w, mu1, mu2, sigma) // - then compute and collect gradients by shifting convolved bottom input data and multiplying it with the top error data // - finally back-propagade the error by convolving top error with the rotated filters (we can use the same function as for forward-pass, but need to transpose mu1 and mu2 values) M_Assert(this->is_data_on_gpu() == true, "Backward_gpu requires data on GPU, but is_data_on_gpu() returned false !"); this->current_iteration_index++; //return; // get buffers for all parameters that we learn const Dtype* filter_weights = this->param_w(); const Dtype* filter_offsets_float_mu1 = this->param_mu1(); const Dtype* filter_offsets_float_mu2 = this->param_mu2(); Dtype* param_weights_diff = this->param_w_grad(); Dtype* param_mu1_diff = this->param_mu1_grad(); Dtype* param_mu2_diff = this->param_mu2_grad(); Dtype* param_sigma_diff = this->param_sigma_grad(); Dtype* bias_diff = this->param_bias_grad(); Dtype* bwd_gradients_data = this->temp_bwd_gradients(); // get filters for back-propagation const Dtype* deriv_error_kernel = this->get_deriv_kernel_error(stream_[0]); // get filters for param gradients const Dtype* deriv_kernels_data = this->get_deriv_kernel_params(stream_[0]); // intermediate data for blurred input Dtype* interm_data = this->temp_interm_buffer(); // transform all four accumulated gradients into seperabe buffers of size [S x G x F] int param_size = this->units_per_channel * this->conv_in_channels_ * this->conv_out_channels_; // make sure gradient aggregation buffer is zeroed caffe_gpu_memset(param_size * NUM_K * sizeof(Dtype), 0, bwd_gradients_data); cudaEvent_t memset_top, memset_filter, memset_error; CUDA_CHECK(cudaEventCreate(&memset_top)); CUDA_CHECK(cudaEventCreate(&memset_filter)); CUDA_CHECK(cudaEventCreate(&memset_error)); { // Gradient w.r.t. bias. if (this->bias_term_ && params_propagate_down[4]) { this->backward_gpu_bias(bias_diff, top_error); } // Gradient w.r.t w,mu1,mu2 and sigma if (params_propagate_down[0]) { // TODO: if it is faster we should add zeroing to input prepare functions !! // convolve with kernel { caffe_gpu_set_async(this->buffer_bwd_.filtered_images_sizes_/sizeof(Dtype), (Dtype)0, this->buffer_bwd_.filtered_images, paralel_streams[0]); caffe_gpu_set_async(this->buffer_bwd_.error_image_sizes_/sizeof(Dtype), (Dtype)0, this->buffer_bwd_.error_images, paralel_streams[1]); CUDA_CHECK(cudaEventRecord(memset_filter, paralel_streams[0])); CUDA_CHECK(cudaEventRecord(memset_error, paralel_streams[1])); conv2_data_desc sig_desc(this->conv_in_channels_* this->batch_num_, 1, this->height_, this->width_, this->height_*this->width_, this->height_*this->width_, this->width_, 1); conv2_data_desc filt_desc(1,this->NUM_K,this->aggregation.kernel_h_,this->aggregation.kernel_w_, this->NUM_K * this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_w_, 1); conv2_data_desc out_desc(this->conv_in_channels_* this->batch_num_, this->NUM_K, this->height_, this->width_, this->height_*this->width_ * this->NUM_K, this->height_*this->width_, this->width_, 1); caffe_gpu_convolve2(interm_data, out_desc, bottom_data, sig_desc, deriv_kernels_data, filt_desc, stream_[0]); CUDA_CHECK(cudaStreamWaitEvent(stream_[0], memset_filter, 0)); CUDA_CHECK(cudaStreamWaitEvent(stream_[0], memset_error, 0)); } // collect gradients by shifting convolved bottom input data and multiplying it with the top error data // WARNING: if this->kernel_w_ or this->kernel_h_ changes then memory will not be allocated properly backward_grad_obj->backward_pass(interm_data, top_error, filter_offsets_float_mu1, filter_offsets_float_mu2, filter_weights, this->kernel_w_, this->kernel_h_, this->offsets_already_centered_, bwd_gradients_data, this->buffer_bwd_.filtered_images, this->buffer_bwd_.error_images, this->buffer_bwd_.filter_weights, this->buffer_bwd_.filter_offsets, //this->ignore_edge_gradients_, stream_[0]); this->ignore_edge_gradients_, 0); } // finally perform back-propagation of the error values if (propagate_down) { Dtype const* top_error_for_bwd = top_error; // if size top_error (input) is smaller then interm_data (output) (i.e. expected input should be the same size as output) // then we need to copy top_error to bigger buffer i.e. with padded zeros if (buffer_bwd_.resized_top_for_bwd_sizes_ > 0) { // set zeros caffe_gpu_set_async<Dtype>(buffer_bwd_.resized_top_for_bwd_sizes_ / sizeof(float), (Dtype)0, buffer_bwd_.resized_top_for_bwd, stream_[0]); // then copy but with appropriate padding caffe_gpu_pad2d(this->batch_num_ * this->conv_out_channels_, this->height_out_, this->width_out_, this->width_/2 - this->width_out_/2, top_error, buffer_bwd_.resized_top_for_bwd, stream_[0]); top_error_for_bwd = buffer_bwd_.resized_top_for_bwd; } // convolve with kernels { // NOTE: memory buffer is shared with gradient compute so make sure not to zero it before backward_grad_obj->backward_pass is done caffe_gpu_set_async<Dtype>(this->conv_in_channels_* this->batch_num_* this->height_* this->width_, (Dtype)0, bottom_error, paralel_streams[0]); caffe_gpu_set_async<Dtype>(buffer_fwd_.filtered_images_sizes_ / sizeof(float), (Dtype)0, buffer_fwd_.filtered_images, paralel_streams[1]); CUDA_CHECK(cudaEventRecord(memset_top, paralel_streams[0])); CUDA_CHECK(cudaEventRecord(memset_filter, paralel_streams[1])); int max_width = std::max(this->width_out_,this->width_); int max_height = std::max(this->height_out_,this->height_); conv2_data_desc sig_desc(1, this->conv_out_channels_* this->batch_num_, max_height, max_width, this->conv_out_channels_* this->batch_num_*max_height*max_width, max_height*max_width, max_width, 1); conv2_data_desc filt_desc(1,1,this->aggregation.kernel_h_,this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_h_ * this->aggregation.kernel_w_, this->aggregation.kernel_w_, 1); conv2_data_desc out_desc = sig_desc; caffe_gpu_convolve2(interm_data, out_desc, top_error_for_bwd, sig_desc, deriv_error_kernel, filt_desc, stream_[0]); CUDA_CHECK(cudaStreamWaitEvent(stream_[0], memset_top, 0)); CUDA_CHECK(cudaStreamWaitEvent(stream_[0], memset_filter, 0)); } // then use our custom kernel for forwarding, however we need to transpose kernels, which in our case means // that we need to rotate mu1,mu2 locations // get param buffer for mu1 and mu2 that will be rotated Dtype *param_mu1_backprop = this->temp_param_buffer() + 0 * param_size; Dtype *param_mu2_backprop = this->temp_param_buffer() + 1 * param_size; // rot(mu) = (kernel_w-1) - mu { caffe_gpu_memcpy_async(param_size * sizeof(float), filter_offsets_float_mu1, param_mu1_backprop, 0); caffe_gpu_memcpy_async(param_size * sizeof(float), filter_offsets_float_mu2, param_mu2_backprop, 0); caffe_gpu_scal(param_size, (Dtype)-1, param_mu1_backprop, cublas_handle); caffe_gpu_scal(param_size, (Dtype)-1, param_mu2_backprop, cublas_handle); // if params are already centered then nothing else needed if (this->offsets_already_centered_ == false) { caffe_gpu_add_scalar(param_size, (Dtype) (this->kernel_w_ - 1), param_mu1_backprop); caffe_gpu_add_scalar(param_size, (Dtype) (this->kernel_h_ - 1), param_mu2_backprop); } } // now we take the blured error data and perform sum over shifted input data with our custom kernel i.e. forward pass this->backward_backporp_obj->forward_pass(interm_data, param_mu1_backprop, param_mu2_backprop, filter_weights, DAUConvForward<Dtype>::FGS, this->kernel_w_, this->kernel_h_, this->offsets_already_centered_, bottom_error, buffer_fwd_.filtered_images, NULL, NULL, buffer_fwd_.filter_offsets_and_weights, stream_[0]); } } // we need to accumulate gradients to the final buffer and add weights to some derivates if (params_propagate_down[0] || params_propagate_down[1] || params_propagate_down[2] || params_propagate_down[3]) { // multiply gradients with appropriate weights /// add add weight multiplyer as specifed by derivative formula only for mu1,mu2 and sigma if (NUM_K > 1 && params_propagate_down[1]) caffe_gpu_mul(param_size, bwd_gradients_data + 1 * param_size, filter_weights, bwd_gradients_data + 1 * param_size); // mu1 if (NUM_K > 2 && params_propagate_down[2]) caffe_gpu_mul(param_size, bwd_gradients_data + 2 * param_size, filter_weights, bwd_gradients_data + 2 * param_size); // mu2 if (NUM_K > 3 && params_propagate_down[3]) caffe_gpu_mul(param_size, bwd_gradients_data + 3 * param_size, filter_weights, bwd_gradients_data + 3 * param_size); // sigma // for weight gradient we only accumulate to final buffer if (NUM_K > 0 && params_propagate_down[0]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 0 * param_size, param_weights_diff, cublas_handle); // w if (NUM_K > 1 && params_propagate_down[1]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 1 * param_size, param_mu1_diff, cublas_handle); // mu1 if (NUM_K > 2 && params_propagate_down[2]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 2 * param_size, param_mu2_diff, cublas_handle); // mu2 if (NUM_K > 3 && params_propagate_down[3]) caffe_gpu_axpy(param_size, (Dtype)1, bwd_gradients_data + 3 * param_size, param_sigma_diff, cublas_handle); // sigma // if we need to ignore last few gauss then make sure we do not update their parameters if (this->num_units_ignore > 0) { this->set_last_n_gauss_to_zero(param_weights_diff, this->num_units_ignore); this->set_last_n_gauss_to_zero(param_mu1_diff, this->num_units_ignore); this->set_last_n_gauss_to_zero(param_mu2_diff, this->num_units_ignore); this->set_last_n_gauss_to_zero(param_sigma_diff, this->num_units_ignore); } } CUDA_CHECK(cudaEventDestroy(memset_top)); CUDA_CHECK(cudaEventDestroy(memset_filter)); CUDA_CHECK(cudaEventDestroy(memset_error)); } template <typename Dtype> __global__ void set_last_n_gauss_to_zero_kernel(const int S, const int G, const int F, Dtype* x, int num_gauss_zero) { CUDA_KERNEL_LOOP(index, S*G*F) { int f = (index % F) ; int sg = index / F; int g = (sg % G); int s = sg / G; if (g >= G - num_gauss_zero) x[index] = 0; } } template <typename Dtype> void BaseDAUConvLayer<Dtype>::set_last_n_gauss_to_zero(Dtype* array, int num_gauss_zero){ set_last_n_gauss_to_zero_kernel<Dtype><<<CUDA_GET_BLOCKS(this->conv_in_channels_ * this->units_per_channel * this->conv_out_channels_), CUDA_NUM_THREADS>>>(this->conv_in_channels_, this->units_per_channel, this->conv_out_channels_, array, num_gauss_zero); } // TODO: we could speed-up with vectorized read/write // pre-compute sigma inverse values needed in Gaussian distribution (1/sigma^2, 1/sigma^3 and 1/2*1/sigma^2) template <typename Dtype> __global__ void conv_gauss_precompute_sigma_kernel(const int n, Dtype* buf_sigma, Dtype* buf_sigma_square_inv, Dtype* buf_sigma_cube_inv, Dtype* buf_sigma_square_inv_half, const int sigma_lower_bound) { CUDA_KERNEL_LOOP(index, n) { Dtype sigma_value = buf_sigma[index]; Dtype sigma2 = sigma_value * sigma_value; Dtype sigma2_inv = 1/sigma2; buf_sigma[index] = sigma_value; buf_sigma_square_inv[index] = sigma2_inv; buf_sigma_cube_inv[index] = 1/(sigma2 * sigma_value); buf_sigma_square_inv_half[index] = (0.5 * sigma2_inv) ; } } template <typename Dtype> __global__ void conv_gauss_distributions_kernel(const int N, const int k_w, int k_h, bool offsets_already_centered, const Dtype* W, const Dtype* MU1, const Dtype* MU2, const Dtype* SIGMA_2_INV, const Dtype* SIGMA_3_INV, const Dtype* SIGMA_2_INV_HALF, Dtype* guass_dist, Dtype* guass_deriv_mu1, Dtype* guass_deriv_mu2, Dtype* guass_deriv_sigma) { const int filter_size = k_w * k_h; for (int n = blockIdx.z * blockDim.z + threadIdx.z; n < N; n += blockDim.z * gridDim.z){ // read w, mu1, mu2, sigma and other data needed to compute gaussian Distributions //const Dtype w = W[n]; const Dtype mu1 = MU1[n] + (offsets_already_centered ? (int)(k_w/2) : 0); const Dtype mu2 = MU2[n] + (offsets_already_centered ? (int)(k_h/2) : 0); const Dtype sigma_square_inv = SIGMA_2_INV[n]; const Dtype sigma_square_inv_half = SIGMA_2_INV_HALF[n]; const Dtype sigma_cube_inv = SIGMA_3_INV[n]; // blockDim by x and y should always be 1 since whole filter will always fit into one block, so just retrive filter x,y indeces and calculate gaussians const int x = threadIdx.x; const int y = threadIdx.y; const Dtype dist_x = x - mu1; const Dtype dist_x_2 = dist_x*dist_x; const Dtype dist_y = y - mu2; const Dtype dist_y_2 = dist_y*dist_y; const Dtype dist = dist_x_2 + dist_y_2; const Dtype gauss_value = exp( -dist * sigma_square_inv_half); const int ptr_offset = n * filter_size + y * k_w + x; guass_dist[ptr_offset] = gauss_value; guass_deriv_mu1[ptr_offset] = (dist_x * sigma_square_inv) * gauss_value; guass_deriv_mu2[ptr_offset] = (dist_y * sigma_square_inv) * gauss_value; guass_deriv_sigma[ptr_offset] = (dist * sigma_cube_inv) * gauss_value; } } template <typename Dtype> __global__ void scal_kernel_batched(const int n, const Dtype* a, const Dtype* x, Dtype* y, const int m) { for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < m; j += blockDim.y * gridDim.y) { Dtype a_value = a[j]; for (int i = j * n + blockIdx.x * blockDim.x + threadIdx.x; i < n* (1 + j) ; i += blockDim.x * gridDim.x) { y[i] = a_value * x[i]; } } } template <typename Dtype> __global__ void axpby_kernel_batched(const int n, const Dtype a_factor, const Dtype* a, const Dtype* x, const Dtype* b, Dtype* y, const int m) { for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < m; j += blockDim.y * gridDim.y) { Dtype a_value = a[j] * a_factor; Dtype b_value = b[j]; for (int i = j * n + blockIdx.x * blockDim.x + threadIdx.x; i < n * (1 + j); i += blockDim.x * gridDim.x) { y[i] = a_value * x[i] + b_value * y[i]; } } } template <typename Dtype> __global__ void add_sorted_kernel(const int S, const int G, const int F, const int n, const Dtype* unsorted_input, Dtype* sorted_output) { for (int f = blockIdx.z * blockDim.z + threadIdx.z; f < F; f += blockDim.z * gridDim.z) { for (int s = blockIdx.y * blockDim.y + threadIdx.y; s < S; s += blockDim.y * gridDim.y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { Dtype sum_g = 0; for (int g = 0; g < G; ++g) { sum_g += unsorted_input[ ((s*G + g)*F + f )*n + i]; } sorted_output[(f*S + s)*n + i] = sum_g; } } } } template <typename Dtype> __global__ void inv_kernel(const int n, const Dtype* x, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = 1 / x[index]; } } template <typename Dtype> __global__ void mirror_kernel(const int S, const int F, const int n, const Dtype* x, Dtype* y) { for (int f = blockIdx.z * blockDim.z + threadIdx.z; f < F; f += blockDim.z * gridDim.z) { for (int s = blockIdx.y * blockDim.y + threadIdx.y; s < S; s += blockDim.y * gridDim.y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { // perform kernel mirroring by setting y[i] = x[n-i -1] // at the same time switch S and F indexes y[(s*F + f) * n + i] = x[(f*S + s) * n + n - i -1]; } } } } template <typename Dtype> void BaseDAUKernelCompute<Dtype>::get_kernels(BaseDAUKernelParams<Dtype>& input, BaseDAUKernelOutput<Dtype>& output, cublasHandle_t cublas_handle) { // we get mutable ptr but we do not modify it, this is just poor code in part of the CUB code int* tmp_precomp_index_gpu = this->precomp_index(); clock_t start_t = clock(); Dtype* weight = output.weight(); const Dtype* gauss_params_w = input.weight(); Dtype* gauss_params_mu1 = input.mu1(); Dtype* gauss_params_mu2 = input.mu2(); Dtype* gauss_params_sigma = input.sigma(); Dtype* gauss_params_sigma_square_inv = this->param_temp(SIGMA_SQUARE_INV); Dtype* gauss_params_sigma_cube_inv = this->param_temp(SIGMA_CUBE_INV); Dtype* gauss_params_sigma_square_inv_half = this->param_temp(SIGMA_SQUARE_INV_HALF); const int S = this->num_in_channels; const int F = this->num_out_channels; const int G = this->num_gauss; const int K_w = this->kernel_w; const int K_h = this->kernel_h; // clip sigma, mu1 and mu2 to within bounds caffe_gpu_clip_lower(S*F*G, this->sigma_lower_bound, gauss_params_sigma, gauss_params_sigma); Dtype mu1_lower_limit = this->offsets_already_centered == false ? (Dtype)component_border_bound : (-1* (int)(kernel_w/2) + component_border_bound); Dtype mu2_lower_limit = this->offsets_already_centered == false ? (Dtype)component_border_bound : (-1* (int)(kernel_h/2) + component_border_bound); Dtype mu1_upper_limit = this->offsets_already_centered == false ? kernel_w-1 - (Dtype)component_border_bound : ((int)(kernel_w/2) - component_border_bound); Dtype mu2_upper_limit = this->offsets_already_centered == false ? kernel_h-1 - (Dtype)component_border_bound : ((int)(kernel_h/2) - component_border_bound); caffe_gpu_clip_lower(S*F*G, mu1_lower_limit, gauss_params_mu1, gauss_params_mu1); caffe_gpu_clip_lower(S*F*G, mu2_lower_limit, gauss_params_mu2, gauss_params_mu2); caffe_gpu_clip_upper(S*F*G, mu1_upper_limit, gauss_params_mu1, gauss_params_mu1); caffe_gpu_clip_upper(S*F*G, mu2_upper_limit, gauss_params_mu2, gauss_params_mu2); // 0. precompute sigma^2, sigma^3 and (sigma^2)/2 conv_gauss_precompute_sigma_kernel<Dtype><<<CUDA_GET_BLOCKS(S*G*F), CUDA_NUM_THREADS>>>(S*G*F, gauss_params_sigma, gauss_params_sigma_square_inv, gauss_params_sigma_cube_inv, gauss_params_sigma_square_inv_half, this->sigma_lower_bound); // 1. for each pixel in [SxGxF] x [K_w x K_h] compute G (Gauss distribution), dG/dx, dG/dy, dG/dsigma // cuda dimension X runs over K_w, Y over K_h and dimension Z over all filters // we translate cuda thread X,Y dimensions directly to filter indexces of size K_w, K_h and assign cuda thread Z dimension with // several filters to fill as many CAFFE_CUDA_NUM_THREADS threads available (i.e. multiple filter can be processed in one cuda block) dim3 threadsPerBlock(K_w, K_h, CUDA_NUM_THREADS/(K_w * K_h)); dim3 numBlocks(1, 1, (S*G*F + threadsPerBlock.z - 1) / threadsPerBlock.z); Dtype* gauss_dist = this->kernels_temp(GAUSS_DIST); size_t d_param_size = S * G* F* K_h * K_w; Dtype* deriv_weight = output.d_params() + 0 * d_param_size; Dtype* deriv_mu1 = output.d_params() + 1 * d_param_size; Dtype* deriv_mu2 = output.d_params() + 2 * d_param_size; Dtype* deriv_sigma = output.d_params() + 3 * d_param_size; conv_gauss_distributions_kernel<Dtype><<<numBlocks,threadsPerBlock>>>(S*G*F, K_w, K_h, this->offsets_already_centered, gauss_params_w, gauss_params_mu1, gauss_params_mu2, gauss_params_sigma_square_inv, gauss_params_sigma_cube_inv, gauss_params_sigma_square_inv_half, gauss_dist, deriv_mu1, deriv_mu2, deriv_sigma); // 2. for each filter (G, dG/dx, dG/dy, dG/dsigma) calculate sums (use different sums if using normalization by square sum) Dtype* guass_norm = this->param_temp(GAUSS_NORM); Dtype* deriv_mu1_sums = this->param_temp(DERIV_MU1_SUMS); Dtype* deriv_mu2_sums = this->param_temp(DERIV_MU2_SUMS); Dtype* deriv_sigma_sums = this->param_temp(DERIV_SIGMA_SUMS); // TODO: all three sums can be done in parallel, do we need seperate streams to make this run in parallel ? if (this->use_unit_normalization == false) { // if there is no normalization then there should be no derivative of normalization caffe_gpu_set((S*F*G), (Dtype)0, deriv_mu1_sums); caffe_gpu_set((S*F*G), (Dtype)0, deriv_mu2_sums); caffe_gpu_set((S*F*G), (Dtype)0, deriv_sigma_sums); } else if (this->use_square_unit_normalization) { // when using square gauss normalization derivatives dG/dx, dG/dy, dG/dsigma need to be multiplied by un-weighted, un-normalized gaussian dstirubution i.e. gauss_dist Dtype* deriv_mu1_times_gauss_dist = this->kernels_temp(DERIV_MU1_TIMES_GAUSS_DIST); Dtype* deriv_mu2_times_gauss_dist = this->kernels_temp(DERIV_MU2_TIMES_GAUSS_DIST); Dtype* deriv_sigma_times_gauss_dist = this->kernels_temp(DERIV_SIGMA_TIMES_GAUSS_DIST); caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, deriv_mu1, deriv_mu1_times_gauss_dist); // deriv_mu1_times_gauss_dist = gauss_dist * deriv_mu1; caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, deriv_mu2, deriv_mu2_times_gauss_dist); // deriv_mu2_times_gauss_dist = gauss_dist * deriv_mu2; caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, deriv_sigma, deriv_sigma_times_gauss_dist); // deriv_sigma_times_gauss_dist = gauss_dist * deriv_sigma; caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu1_times_gauss_dist, deriv_mu1_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu2_times_gauss_dist, deriv_mu2_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_sigma_times_gauss_dist, deriv_sigma_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_scal((S*F*G), (Dtype)2, deriv_mu1_sums, cublas_handle); caffe_gpu_scal((S*F*G), (Dtype)2, deriv_mu2_sums, cublas_handle); caffe_gpu_scal((S*F*G), (Dtype)2, deriv_sigma_sums, cublas_handle); } else { caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu1, deriv_mu1_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_mu2, deriv_mu2_sums, S*F*G, tmp_precomp_index_gpu); caffe_gpu_sum((S*F*G) * (K_w*K_h), deriv_sigma, deriv_sigma_sums, S*F*G, tmp_precomp_index_gpu); } if (this->use_unit_normalization == false) { // set guass_norm to 1 if we sould NOT normalize to sum of 1 caffe_gpu_set((S*F*G), (Dtype)1, guass_norm); } else if (this->use_square_unit_normalization) { // we need to normalize to sum of squares to 1 Dtype* gauss_dist_square = this->kernels_temp(GAUSS_DIST_SQUARE); caffe_gpu_mul((S*F*G) * (K_w*K_h), gauss_dist, gauss_dist, gauss_dist_square); // gauss_dist_square = gauss_dist * gauss_dist; caffe_gpu_sum((S*F*G) * (K_w*K_h), gauss_dist_square, guass_norm, S*F*G, tmp_precomp_index_gpu); } else { // we need to normalize to sum of 1 caffe_gpu_sum((S*F*G) * (K_w*K_h), gauss_dist, guass_norm, S*F*G, tmp_precomp_index_gpu); } // invert guass_norm i.e. guass_norm = 1/guass_norm inv_kernel<Dtype><<<CUDA_GET_BLOCKS(S*G*F), CUDA_NUM_THREADS>>>(S*G*F, guass_norm, guass_norm); // gauss_mu1_sum = abs(gauss_mu1_sum) > 1e-10 ? gauss_mu1_sum : 0; caffe_gpu_clip_eps(S*F*G, (Dtype)1e-10, deriv_mu1_sums, deriv_mu1_sums); caffe_gpu_clip_eps(S*F*G, (Dtype)1e-10, deriv_mu2_sums, deriv_mu2_sums); // 3. for each filter G and derivative filters dG/dx, dG/dy, dG/dsigma apply its normalization terms threadsPerBlock = dim3(K_w* K_h, CUDA_NUM_THREADS/(K_w * K_h)); numBlocks = dim3(1, (S*F*G + threadsPerBlock.y - 1) / threadsPerBlock.y); // deriv_weight = gauss_dist * guass_norm scal_kernel_batched<Dtype><<<numBlocks,threadsPerBlock>>>(K_w * K_h, guass_norm, gauss_dist, deriv_weight, S*F*G); // !after! weight and deriv_weight are computed we can add weight to guass_norm which will be used in remaining derivateives and main kernel caffe_gpu_mul(S*F*G, gauss_params_w, guass_norm, guass_norm); // guass_norm = gauss_params_w / guass_norm; // apply gauss normalization factors directly to sums to avoid additional call to scal_kernel_batched caffe_gpu_mul(S*F*G, guass_norm, deriv_mu1_sums, deriv_mu1_sums); // deriv_mu1_sums = deriv_mu1_sums * guass_norm; caffe_gpu_mul(S*F*G, guass_norm, deriv_mu2_sums, deriv_mu2_sums); // deriv_mu2_sums = deriv_mu2_sums * guass_norm; caffe_gpu_mul(S*F*G, guass_norm, deriv_sigma_sums, deriv_sigma_sums); // deriv_sigma_sums = deriv_sigma_sums * guass_norm; // create normalized derivative filters axpby_kernel_batched<Dtype><<<numBlocks,threadsPerBlock>>>(K_w * K_h, (Dtype)-1, deriv_mu1_sums, deriv_weight, guass_norm, deriv_mu1, S*F*G); axpby_kernel_batched<Dtype><<<numBlocks,threadsPerBlock>>>(K_w * K_h, (Dtype)-1, deriv_mu2_sums, deriv_weight, guass_norm, deriv_mu2, S*F*G); axpby_kernel_batched<Dtype><<<numBlocks,threadsPerBlock>>>(K_w * K_h, (Dtype)-1, deriv_sigma_sums, deriv_weight, guass_norm, deriv_sigma, S*F*G); // 4. calculate main kernel weights by applying gauss norm and weights, and suming over SxGxF kernels into FxS kernels (in correct order i.e. rearagning them at the same time) // gauss_dist = w/norm * gauss_dist (note, guass_norm should be w/norm) scal_kernel_batched<Dtype><<<numBlocks,threadsPerBlock>>>(K_w * K_h, guass_norm, gauss_dist, gauss_dist, S*F*G); threadsPerBlock = dim3(K_w*K_h, sqrt(CUDA_NUM_THREADS/(K_w * K_h) ), sqrt(CUDA_NUM_THREADS/(K_w * K_h) ) ); numBlocks = dim3(1, (S + threadsPerBlock.y - 1) / threadsPerBlock.y, (F + threadsPerBlock.z - 1) / threadsPerBlock.z); add_sorted_kernel<Dtype><<<numBlocks,threadsPerBlock>>>(S, G, F, K_w*K_h, gauss_dist, weight); // 4. calculate seperable filters (WILL NOT IMPLEMENET) // 5. create error kernel for back-propagation by reversing the kernel Dtype* deriv_error = output.d_error(); threadsPerBlock = dim3(K_w*K_h, sqrt(CUDA_NUM_THREADS/(K_w * K_h) ), sqrt(CUDA_NUM_THREADS/(K_w * K_h) ) ); numBlocks = dim3(1, (S + threadsPerBlock.y - 1) / threadsPerBlock.y, (F + threadsPerBlock.z - 1) / threadsPerBlock.z); mirror_kernel<Dtype><<<numBlocks,threadsPerBlock>>>(S, F, K_w*K_h, weight, deriv_error); //cudaDeviceSynchronize(); clock_t end_t = clock(); } template void BaseDAUKernelCompute<float>::get_kernels(BaseDAUKernelParams<float>& input, BaseDAUKernelOutput<float>& output, cublasHandle_t cublas_handle); template void BaseDAUKernelCompute<double>::get_kernels(BaseDAUKernelParams<double>& input, BaseDAUKernelOutput<double>& output, cublasHandle_t cublas_handle); template void BaseDAUConvLayer<double>::set_last_n_gauss_to_zero(double* array, int num_gauss_zero); template void BaseDAUConvLayer<float>::set_last_n_gauss_to_zero(float* array, int num_gauss_zero); template void BaseDAUConvLayer<double>::Forward_gpu(const double* bottom_data, const vector<int> bottom_shape, double* top_data, const vector<int> top_shape); template void BaseDAUConvLayer<float>::Forward_gpu(const float* bottom_data, const vector<int> bottom_shape, float* top_data, const vector<int> top_shape); template void BaseDAUConvLayer<double>::Backward_gpu(const double* top_data, const double* top_error, const vector<int>& top_shape, bool propagate_down, const double* bottom_data, double* bottom_error, const vector<int>& bottom_shape, const vector<bool>& params_propagate_down ); template void BaseDAUConvLayer<float>::Backward_gpu(const float* top_data, const float* top_error, const vector<int>& top_shape, bool propagate_down, const float* bottom_data, float* bottom_error, const vector<int>& bottom_shape, const vector<bool>& params_propagate_down ); } // namespace dau_conv_impl
811f5704750855d4ad09da8a0e0aa9ad2ae8149a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pch.h" __global__ void mmul( float *A, float *B, float *C, int m, int p, int q) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; if( col < q && row < m) { for(int i = 0; i < p; i++) { sum += A[row*p+i] * B[i*q+col]; } C[row*q+col] = sum; } } __host__ void gpuMult(float *h_A, float *h_B, float *gpu_C, const int m, const int p, const int q) { //declare variables to be used by GPU (device) for matrix multiplication float *d_A, *d_B, *d_C; // Allocate device matrices on GPU using hipMalloc hipMalloc(&d_A, m*p*sizeof(float)); hipMalloc(&d_B, p*q*sizeof(float)); hipMalloc(&d_C, m*q*sizeof(float)); cudaCheckErrors("hipMalloc failure"); // Copy host matrices A and B to the device using hipMemcpy hipMemcpy(d_A, h_A, m*p*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, p*q*sizeof(float), hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D failture"); //calculate grid and block dimensions // Remember: the maximum number of total threads is 1024. unsigned int block_size = BLOCK_SIZE; // from pch.h is 32 dim3 block(block_size, block_size); //calculate grid dimensions here unsigned int grid_rows = (m + block_size - 1) / block_size; unsigned int grid_cols = (q + block_size - 1) / block_size; dim3 grid(grid_cols, grid_rows); printf("Kernel launch dimensions: \n"); printf("\tGrid size : {%d, %d, %d} blocks.\n",grid.x, grid.y, grid.z); printf("\tBlock size : {%d, %d, %d} threads.\n",block.x, block.y, block.z); //carry out matrix multiplication on the GPUs hipLaunchKernelGGL(( mmul), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C,m,p,q); // Synchronize the device hipDeviceSynchronize(); // Transfer results from device to host hipMemcpy(gpu_C, d_C, sizeof(float)*m*q, hipMemcpyDeviceToHost); cudaCheckErrors("Kernel execution failure or hipMemcpy H2D failure"); // Free the device matrices hipFree(d_A); hipFree(d_B); hipFree(d_C); cudaCheckErrors("hipFree failure"); }
811f5704750855d4ad09da8a0e0aa9ad2ae8149a.cu
#include "pch.h" __global__ void mmul( float *A, float *B, float *C, int m, int p, int q) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; if( col < q && row < m) { for(int i = 0; i < p; i++) { sum += A[row*p+i] * B[i*q+col]; } C[row*q+col] = sum; } } __host__ void gpuMult(float *h_A, float *h_B, float *gpu_C, const int m, const int p, const int q) { //declare variables to be used by GPU (device) for matrix multiplication float *d_A, *d_B, *d_C; // Allocate device matrices on GPU using cudaMalloc cudaMalloc(&d_A, m*p*sizeof(float)); cudaMalloc(&d_B, p*q*sizeof(float)); cudaMalloc(&d_C, m*q*sizeof(float)); cudaCheckErrors("cudaMalloc failure"); // Copy host matrices A and B to the device using cudaMemcpy cudaMemcpy(d_A, h_A, m*p*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, p*q*sizeof(float), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D failture"); //calculate grid and block dimensions // Remember: the maximum number of total threads is 1024. unsigned int block_size = BLOCK_SIZE; // from pch.h is 32 dim3 block(block_size, block_size); //calculate grid dimensions here unsigned int grid_rows = (m + block_size - 1) / block_size; unsigned int grid_cols = (q + block_size - 1) / block_size; dim3 grid(grid_cols, grid_rows); printf("Kernel launch dimensions: \n"); printf("\tGrid size : {%d, %d, %d} blocks.\n",grid.x, grid.y, grid.z); printf("\tBlock size : {%d, %d, %d} threads.\n",block.x, block.y, block.z); //carry out matrix multiplication on the GPUs mmul<<<grid,block>>>(d_A,d_B,d_C,m,p,q); // Synchronize the device cudaDeviceSynchronize(); // Transfer results from device to host cudaMemcpy(gpu_C, d_C, sizeof(float)*m*q, cudaMemcpyDeviceToHost); cudaCheckErrors("Kernel execution failure or cudaMemcpy H2D failure"); // Free the device matrices cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaCheckErrors("cudaFree failure"); }
4db0f8ef628bfac76044ce2ecf41142530645160.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/details/nan_inf_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils_detail.h" #include <algorithm> #include <unordered_map> #include <utility> #include <vector> namespace paddle { namespace framework { namespace details { static std::once_flag init_multi_gpu_op_var_map_flag; // lazy init static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>& multi_op_var2gpu_str() { static std::vector<std::unordered_map<std::string, memory::AllocationPtr>> _multi_op_var2gpu_str; return _multi_op_var2gpu_str; } static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() { static std::vector<std::mutex> _multi_op_var2gpu_str_mutex; return _multi_op_var2gpu_str_mutex; } static void InitMultiGPUOpVarMap() { int dev_count = platform::GetCUDADeviceCount(); PADDLE_ENFORCE_GT(dev_count, 0, platform::errors::NotFound( "cuda device must > 0, now dev_count=%d", dev_count)); // https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi( dev_count); std::vector<std::mutex> tmp_multi_mutex(dev_count); multi_op_var2gpu_str().swap(tmp_multi); multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex); } template <typename T> __device__ __forceinline__ void PrintNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int nan_count, inf_count, num_count; if (threadIdx.x == 0) nan_count = inf_count = num_count = 0; __syncthreads; for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { unsigned int count = 0; if (isnan(value[i])) { count = atomicAdd(&nan_count, 1); } else if (isinf(value[i])) { count = atomicAdd(&inf_count, 1); } else { count = atomicAdd(&num_count, 1); } // for cuda, print in every block if (count < print_num) { printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel), static_cast<uint64_t>(i), static_cast<float>(value[i])); } } __syncthreads; #ifdef __HIPCC__ if (true && hipThreadIdx_x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x, nan_count, inf_count, num_count); #else if (true && threadIdx.x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x, nan_count, inf_count, num_count); #endif PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info); } } // Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s template <typename T> __global__ void CheckNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { /// step 1, judge wheater has nan or inf __shared__ volatile int has_nan_inf; if (threadIdx.x == 0) has_nan_inf = false; __syncthreads(); const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; T sum = static_cast<T>(0.0); // Todo(wangxi). simd speed up for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { sum += (value[i] - value[i]); } if (isnan(sum) || isinf(sum)) has_nan_inf = true; __syncthreads(); /// Note. different blocks may behave differently if (!has_nan_inf) return; PrintNanInfKernel(value, numel, print_num, debug_info); } template <> template <typename T> void TensorCheckerVisitor<platform::CUDADeviceContext>::apply( typename std::enable_if< std::is_floating_point<T>::value || std::is_same<T, ::paddle::platform::complex<float>>::value || std::is_same<T, ::paddle::platform::complex<double>>::value>::type*) const { int print_num = 3; auto* dev_ctx = reinterpret_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(tensor_.place())); int dev_id = BOOST_GET_CONST(platform::CUDAPlace, tensor_.place()).device; PADDLE_ENFORCE_EQ( (dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true, platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d", multi_op_var2gpu_str_mutex().size())); std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]"; char* gpu_str_ptr = NULL; { auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id); auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id); std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex); if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert auto gpu_str_tensor = paddle::memory::Alloc(*dev_ctx, op_var.length() + 1); gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr()); op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor)); auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should successed insert into " "op_var2gpu_str, but now failed", op_var)); #ifdef __HIPCC__ PADDLE_ENFORCE_CUDA_SUCCESS( hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #else PADDLE_ENFORCE_CUDA_SUCCESS( hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #endif } else { // get auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should be in the op_var2gpu_str, but " "now can't find it", op_var)); gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr()); } } #ifdef __HIPCC__ // HIP will throw GPU memory access fault if threads > 256 const size_t threads = 256; #else const size_t threads = 1024; #endif size_t blocks = ::min(static_cast<size_t>(128), static_cast<size_t>((tensor_.numel() + threads - 1) / threads)); #ifdef __HIPCC__ hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #else hipLaunchKernelGGL(( CheckNanInfKernel), dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #endif } template <> void tensor_check<platform::CUDADeviceContext>(const std::string& op_type, const std::string& var_name, const framework::Tensor& tensor, const platform::Place& place) { std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap); TensorCheckerVisitor<platform::CUDADeviceContext> vistor(op_type, var_name, tensor, place); VisitDataType(tensor.type(), vistor); } } // namespace details } // namespace framework } // namespace paddle
4db0f8ef628bfac76044ce2ecf41142530645160.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/details/nan_inf_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils_detail.h" #include <algorithm> #include <unordered_map> #include <utility> #include <vector> namespace paddle { namespace framework { namespace details { static std::once_flag init_multi_gpu_op_var_map_flag; // lazy init static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>& multi_op_var2gpu_str() { static std::vector<std::unordered_map<std::string, memory::AllocationPtr>> _multi_op_var2gpu_str; return _multi_op_var2gpu_str; } static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() { static std::vector<std::mutex> _multi_op_var2gpu_str_mutex; return _multi_op_var2gpu_str_mutex; } static void InitMultiGPUOpVarMap() { int dev_count = platform::GetCUDADeviceCount(); PADDLE_ENFORCE_GT(dev_count, 0, platform::errors::NotFound( "cuda device must > 0, now dev_count=%d", dev_count)); // https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi( dev_count); std::vector<std::mutex> tmp_multi_mutex(dev_count); multi_op_var2gpu_str().swap(tmp_multi); multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex); } template <typename T> __device__ __forceinline__ void PrintNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int nan_count, inf_count, num_count; if (threadIdx.x == 0) nan_count = inf_count = num_count = 0; __syncthreads; for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { unsigned int count = 0; if (isnan(value[i])) { count = atomicAdd(&nan_count, 1); } else if (isinf(value[i])) { count = atomicAdd(&inf_count, 1); } else { count = atomicAdd(&num_count, 1); } // for cuda, print in every block if (count < print_num) { printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel), static_cast<uint64_t>(i), static_cast<float>(value[i])); } } __syncthreads; #ifdef __HIPCC__ if (true && hipThreadIdx_x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x, nan_count, inf_count, num_count); #else if (true && threadIdx.x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x, nan_count, inf_count, num_count); #endif PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info); } } // Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s template <typename T> __global__ void CheckNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { /// step 1, judge wheater has nan or inf __shared__ volatile int has_nan_inf; if (threadIdx.x == 0) has_nan_inf = false; __syncthreads(); const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; T sum = static_cast<T>(0.0); // Todo(wangxi). simd speed up for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { sum += (value[i] - value[i]); } if (isnan(sum) || isinf(sum)) has_nan_inf = true; __syncthreads(); /// Note. different blocks may behave differently if (!has_nan_inf) return; PrintNanInfKernel(value, numel, print_num, debug_info); } template <> template <typename T> void TensorCheckerVisitor<platform::CUDADeviceContext>::apply( typename std::enable_if< std::is_floating_point<T>::value || std::is_same<T, ::paddle::platform::complex<float>>::value || std::is_same<T, ::paddle::platform::complex<double>>::value>::type*) const { int print_num = 3; auto* dev_ctx = reinterpret_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(tensor_.place())); int dev_id = BOOST_GET_CONST(platform::CUDAPlace, tensor_.place()).device; PADDLE_ENFORCE_EQ( (dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true, platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d", multi_op_var2gpu_str_mutex().size())); std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]"; char* gpu_str_ptr = NULL; { auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id); auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id); std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex); if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert auto gpu_str_tensor = paddle::memory::Alloc(*dev_ctx, op_var.length() + 1); gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr()); op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor)); auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should successed insert into " "op_var2gpu_str, but now failed", op_var)); #ifdef __HIPCC__ PADDLE_ENFORCE_CUDA_SUCCESS( hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #else PADDLE_ENFORCE_CUDA_SUCCESS( cudaMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, cudaMemcpyHostToDevice, dev_ctx->stream())); #endif } else { // get auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should be in the op_var2gpu_str, but " "now can't find it", op_var)); gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr()); } } #ifdef __HIPCC__ // HIP will throw GPU memory access fault if threads > 256 const size_t threads = 256; #else const size_t threads = 1024; #endif size_t blocks = std::min(static_cast<size_t>(128), static_cast<size_t>((tensor_.numel() + threads - 1) / threads)); #ifdef __HIPCC__ hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #else CheckNanInfKernel<<<blocks, threads, 0, dev_ctx->stream()>>>( tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #endif } template <> void tensor_check<platform::CUDADeviceContext>(const std::string& op_type, const std::string& var_name, const framework::Tensor& tensor, const platform::Place& place) { std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap); TensorCheckerVisitor<platform::CUDADeviceContext> vistor(op_type, var_name, tensor, place); VisitDataType(tensor.type(), vistor); } } // namespace details } // namespace framework } // namespace paddle
a681b00f3a060e122c66f661b08aeecea6500ffa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file pooling_gpu.cu // @brief Pooling block implementation (GPU) // @author Andrea Vedaldi // @author Karel Lenc /* Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "pooling.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* pooling_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void pooling_max_kernel (T* pooled, const T* data, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int width, const int height, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int px = pooledIndex ; int py = px / pooledWidth ; int pz = py / pooledHeight ; px %= pooledWidth ; py %= pooledHeight ; data += pz * (width*height) ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; T bestValue = data[y1 * width + x1] ; for (int y = y1 ; y < y2 ; ++y) { for (int x = x1 ; x < x2 ; ++x) { bestValue = max(bestValue, data[y * width + x]) ; } } pooled[pooledIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* pooling_average_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void pooling_average_kernel (T* pooled, const T* data, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int width, const int height, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { /* pooledIndex = x + y * pooledWidth + z * (pooledWidth * pooledHeight) */ int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int px = pooledIndex ; int py = px / pooledWidth ; int pz = py / pooledHeight ; px %= pooledWidth ; py %= pooledHeight ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; data += pz * (width*height) ; T accum = 0; T poolSize = (y2 - y1)*(x2 - x1); for (int y = y1 ; y < y2 ; ++y) { for (int x = x1 ; x < x2 ; ++x) { accum += data[y * width + x] ; } } pooled[pooledIndex] = accum / poolSize ; } } /* ---------------------------------------------------------------- */ /* pooling_max_backward */ /* ---------------------------------------------------------------- */ #ifdef VLNN_CAFFELIKE_BPPOOL // In order to be able to use this, BP would need to have access to both // bottom data and pooled data (currently only passed bottom data...) template <typename T> __global__ void pooling_max_backward_with_pooled_data (T* derData, const T* data, const T* pooled, const T* derPooled, const int nthreads, const int pooledWidth, const int pooledHeight, const int width, const int height, const int depth, const int poolWidth, const int poolHeight, const int strideX, const int strideY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { // find out the local index // find out the local offset int x = index % width; int y = (index / width) % height; int z = (index / width / height) % depth; int py1 = (y < poolHeight) ? 0 : (y - poolHeight) / strideY + 1; int py2 = min(y / strideY + 1, pooledHeight); int px1 = (x < poolWidth) ? 0 : (x - poolWidth) / strideX + 1; int px2 = min(x / strideX + 1, pooledWidth); T gradient = 0; T datum = data[(z * height + y) * width + x]; pooled += z * pooledHeight * pooledWidth; dzdy += z * pooledHeight * pooledWidth; for (int py = py1; py < py2; ++py) { for (int px = px1; px < px2; ++px) { gradient += dzdy[py * pooledWidth + px] * (datum == pooled[py * pooledWidth + px]); } } dzdx[index] = gradient; } } #endif #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else // an implementation of atomicAdd() for double (really slow) for older CC static __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif template<typename T> __global__ void pooling_max_backward_kernel (T* derData, const T* data, const T* derPooled, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int width, const int height, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int px = pooledIndex ; int py = px / pooledWidth ; int pz = py / pooledHeight ; px %= pooledWidth ; py %= pooledHeight ; data += pz * (width*height) ; derData += pz * (width*height) ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; int bestIndex = y1 * width + x1 ; T bestValue = data[bestIndex] ; for (int y = y1 ; y < y2 ; ++y) { for (int x = x1 ; x < x2 ; ++x) { int index = y * width + x ; T value = data[index] ; if (value > bestValue) { bestValue = value ; bestIndex = index ; } } } /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requrires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ atomicAdd(derData + bestIndex, derPooled[pooledIndex]) ; } } /* ---------------------------------------------------------------- */ /* pooling_average_backward */ /* ---------------------------------------------------------------- */ template <typename T> __global__ void pooling_average_backward_kernel(T* derData, const T* derPooled, const int nthreads, const int pooledWidth, const int pooledHeight, const int width, const int height, const int depth, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { /* To understand the logic of this piece of code see the comments to of the row2im backward kernel */ int x_data = index ; int y_data = x_data / width ; int z = y_data / height ; x_data %= width ; y_data %= height ; int dx = x_data + padLeft - poolWidth ; int dy = y_data + padTop - poolHeight ; int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ; int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ; int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ; int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ; T accumulator = 0 ; derPooled += z * pooledHeight * pooledWidth; for (int py = py1 ; py <= py2 ; ++py) { for (int px = px1 ; px <= px2 ; ++px) { int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; T poolSize = (y2 - y1) * (x2 - x1); accumulator += derPooled[py * pooledWidth + px] / poolSize ; } } derData[index] = accumulator ; } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct pooling_max<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int pooledVolume = pooledWidth * pooledHeight * depth ; hipLaunchKernelGGL(( pooling_max_kernel<type>) , dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, pooled, data, pooledHeight, pooledWidth, pooledVolume, height, width, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derOutput, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int pooledVolume = pooledWidth * pooledHeight * depth ; hipLaunchKernelGGL(( pooling_max_backward_kernel<type>) , dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, data, derOutput, pooledHeight, pooledWidth, pooledVolume, height, width, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // pooling_max template <typename type> struct pooling_average<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int pooledVolume = pooledWidth * pooledHeight * depth ; hipLaunchKernelGGL(( pooling_average_kernel<type>) , dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, pooled, data, pooledHeight, pooledWidth, pooledVolume, height, width, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* derPooled, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int dataVolume = width * height * depth ; hipLaunchKernelGGL(( pooling_average_backward_kernel<type>) , dim3(divideAndRoundUp(dataVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, derPooled, dataVolume, pooledHeight, pooledWidth, height, width, dataVolume, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // pooling_average } } ; // namespace vl::impl // Instantiations template struct vl::impl::pooling_max<vl::VLDT_GPU, float> ; template struct vl::impl::pooling_average<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::pooling_max<vl::VLDT_GPU, double> ; template struct vl::impl::pooling_average<vl::VLDT_GPU, double> ; #endif
a681b00f3a060e122c66f661b08aeecea6500ffa.cu
// @file pooling_gpu.cu // @brief Pooling block implementation (GPU) // @author Andrea Vedaldi // @author Karel Lenc /* Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "pooling.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* pooling_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void pooling_max_kernel (T* pooled, const T* data, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int width, const int height, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int px = pooledIndex ; int py = px / pooledWidth ; int pz = py / pooledHeight ; px %= pooledWidth ; py %= pooledHeight ; data += pz * (width*height) ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; T bestValue = data[y1 * width + x1] ; for (int y = y1 ; y < y2 ; ++y) { for (int x = x1 ; x < x2 ; ++x) { bestValue = max(bestValue, data[y * width + x]) ; } } pooled[pooledIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* pooling_average_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void pooling_average_kernel (T* pooled, const T* data, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int width, const int height, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { /* pooledIndex = x + y * pooledWidth + z * (pooledWidth * pooledHeight) */ int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int px = pooledIndex ; int py = px / pooledWidth ; int pz = py / pooledHeight ; px %= pooledWidth ; py %= pooledHeight ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; data += pz * (width*height) ; T accum = 0; T poolSize = (y2 - y1)*(x2 - x1); for (int y = y1 ; y < y2 ; ++y) { for (int x = x1 ; x < x2 ; ++x) { accum += data[y * width + x] ; } } pooled[pooledIndex] = accum / poolSize ; } } /* ---------------------------------------------------------------- */ /* pooling_max_backward */ /* ---------------------------------------------------------------- */ #ifdef VLNN_CAFFELIKE_BPPOOL // In order to be able to use this, BP would need to have access to both // bottom data and pooled data (currently only passed bottom data...) template <typename T> __global__ void pooling_max_backward_with_pooled_data (T* derData, const T* data, const T* pooled, const T* derPooled, const int nthreads, const int pooledWidth, const int pooledHeight, const int width, const int height, const int depth, const int poolWidth, const int poolHeight, const int strideX, const int strideY) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { // find out the local index // find out the local offset int x = index % width; int y = (index / width) % height; int z = (index / width / height) % depth; int py1 = (y < poolHeight) ? 0 : (y - poolHeight) / strideY + 1; int py2 = min(y / strideY + 1, pooledHeight); int px1 = (x < poolWidth) ? 0 : (x - poolWidth) / strideX + 1; int px2 = min(x / strideX + 1, pooledWidth); T gradient = 0; T datum = data[(z * height + y) * width + x]; pooled += z * pooledHeight * pooledWidth; dzdy += z * pooledHeight * pooledWidth; for (int py = py1; py < py2; ++py) { for (int px = px1; px < px2; ++px) { gradient += dzdy[py * pooledWidth + px] * (datum == pooled[py * pooledWidth + px]); } } dzdx[index] = gradient; } } #endif #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else // an implementation of atomicAdd() for double (really slow) for older CC static __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif template<typename T> __global__ void pooling_max_backward_kernel (T* derData, const T* data, const T* derPooled, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int width, const int height, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int px = pooledIndex ; int py = px / pooledWidth ; int pz = py / pooledHeight ; px %= pooledWidth ; py %= pooledHeight ; data += pz * (width*height) ; derData += pz * (width*height) ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; int bestIndex = y1 * width + x1 ; T bestValue = data[bestIndex] ; for (int y = y1 ; y < y2 ; ++y) { for (int x = x1 ; x < x2 ; ++x) { int index = y * width + x ; T value = data[index] ; if (value > bestValue) { bestValue = value ; bestIndex = index ; } } } /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requrires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ atomicAdd(derData + bestIndex, derPooled[pooledIndex]) ; } } /* ---------------------------------------------------------------- */ /* pooling_average_backward */ /* ---------------------------------------------------------------- */ template <typename T> __global__ void pooling_average_backward_kernel(T* derData, const T* derPooled, const int nthreads, const int pooledWidth, const int pooledHeight, const int width, const int height, const int depth, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { /* To understand the logic of this piece of code see the comments to of the row2im backward kernel */ int x_data = index ; int y_data = x_data / width ; int z = y_data / height ; x_data %= width ; y_data %= height ; int dx = x_data + padLeft - poolWidth ; int dy = y_data + padTop - poolHeight ; int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ; int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ; int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ; int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ; T accumulator = 0 ; derPooled += z * pooledHeight * pooledWidth; for (int py = py1 ; py <= py2 ; ++py) { for (int px = px1 ; px <= px2 ; ++px) { int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; T poolSize = (y2 - y1) * (x2 - x1); accumulator += derPooled[py * pooledWidth + px] / poolSize ; } } derData[index] = accumulator ; } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct pooling_max<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int pooledVolume = pooledWidth * pooledHeight * depth ; pooling_max_kernel<type> <<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (pooled, data, pooledHeight, pooledWidth, pooledVolume, height, width, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derOutput, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int pooledVolume = pooledWidth * pooledHeight * depth ; pooling_max_backward_kernel<type> <<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, data, derOutput, pooledHeight, pooledWidth, pooledVolume, height, width, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // pooling_max template <typename type> struct pooling_average<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int pooledVolume = pooledWidth * pooledHeight * depth ; pooling_average_kernel<type> <<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (pooled, data, pooledHeight, pooledWidth, pooledVolume, height, width, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* derPooled, size_t height, size_t width, size_t depth, size_t poolHeight, size_t poolWidth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ; int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ; int dataVolume = width * height * depth ; pooling_average_backward_kernel<type> <<< divideAndRoundUp(dataVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, derPooled, dataVolume, pooledHeight, pooledWidth, height, width, dataVolume, poolHeight, poolWidth, strideY, strideX, padTop, padLeft); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // pooling_average } } ; // namespace vl::impl // Instantiations template struct vl::impl::pooling_max<vl::VLDT_GPU, float> ; template struct vl::impl::pooling_average<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::pooling_max<vl::VLDT_GPU, double> ; template struct vl::impl::pooling_average<vl::VLDT_GPU, double> ; #endif
52965711d767ed83783f408cada6542a7c0bf68c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../CUDA/CUDA_func.h" #include "ConvolutionLayer.h" using namespace std; namespace NN { namespace Layers { ConvolutionLayer::ConvolutionLayer(std::vector<int> dependencies, int width, int height, int depth, int neuron_width, int neuron_height, int layer_depth) { this->dependencies = dependencies; this->width = width; this->height = height; this->depth = depth; this->neuron_width = neuron_width; this->neuron_height = neuron_height; this->layer_depth = layer_depth; layer_width = width - (neuron_width - 1); layer_height = height - (neuron_height - 1); output_size = layer_width * layer_height * layer_depth; hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void ConvolutionLayer::compute() { float * prev_layer_output = prev_layer->get_output_iterator(); /*int output_offset; for(int k = 0; k < layer_depth; k++) { for(int j = 0; j < layer_height; j++) { for(int i = 0; i < layer_width; i++) { output_offset = get_output_offset(i, j, k); output[output_offset] = 0; for(int kn = 0; kn < depth; kn++) { for(int jn = 0; jn < neuron_height; jn++) { for(int in = 0; in < neuron_width; in++) { output[output_offset] += parameters[get_parameters_offset(k, in, jn, kn)] * prev_layer_output[get_input_offset(i+in, j+jn, kn)]; } } } output[output_offset] += parameters[(k+1)*(neuron_width*neuron_height*depth + 1) - 1]; } } }*/ int input_size = width * height * depth; int layer_size = layer_height * layer_width; int neuron_size = neuron_width * neuron_height * depth; int blockSize = (output_size + 511) / 512; hipLaunchKernelGGL(( NN::CUDA::compute_conv_layer), dim3(blockSize), dim3(512) , 0, 0, prev_layer_output, parameters, output, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); hipDeviceSynchronize(); } void ConvolutionLayer::backpropagate() { float * prev_layer_output = prev_layer->get_output_iterator(); float * prev_layer_output_gradient = prev_layer->get_output_gradient_iterator(); /*int output_offset; for(int k = 0; k < layer_depth; k++) { for(int j = 0; j < layer_height; j++) { for(int i = 0; i < layer_width; i++) { output_offset = get_output_offset(i, j, k); for(int kn = 0; kn < depth; kn++) { for(int jn = 0; jn < neuron_height; jn++) { for(int in = 0; in < neuron_width; in++) { gradient[get_parameters_offset(k, in, jn, kn)] += output_gradient[output_offset] * prev_layer_output[get_input_offset(i+in, j+jn, kn)]; prev_layer_output_gradient[get_input_offset(i+in, j+jn, kn)] += output_gradient[output_offset] * parameters[get_parameters_offset(k, in, jn, kn)]; } } } gradient[(k+1)*(neuron_width*neuron_height*depth + 1) - 1] += output_gradient[output_offset]; } } }*/ int input_size = width * height * depth; int layer_size = layer_height * layer_width; int neuron_size = neuron_width * neuron_height * depth; int blockSize = (input_size + 511) / 512; hipLaunchKernelGGL(( NN::CUDA::backprop_conv_layer_input) , dim3(blockSize), dim3(512) , 0, 0, prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); hipDeviceSynchronize(); blockSize = (layer_depth * depth + 511) / 512; hipLaunchKernelGGL(( NN::CUDA::backprop_conv_layer_weights) , dim3(blockSize), dim3(512) , 0, 0, prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); hipDeviceSynchronize(); blockSize = (layer_depth + 511) / 512; hipLaunchKernelGGL(( NN::CUDA::backprop_conv_layer_bias) , dim3(blockSize), dim3(512) , 0, 0, prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); hipDeviceSynchronize(); } int ConvolutionLayer::get_parameters_size() { return layer_depth*(neuron_height*neuron_width*depth + 1); } void ConvolutionLayer::update_dependencies(std::vector<NN::Layers::Layer *> layer_dependencies) { prev_layer = layer_dependencies[0]; } void ConvolutionLayer::save(NN::File& file) { int id = 2; file.save(id); save_dependencies(file); file.save(width); file.save(height); file.save(depth); file.save(neuron_width); file.save(neuron_height); file.save(layer_depth); } void ConvolutionLayer::load(NN::File& file) { load_dependencies(file); file.load(width); file.load(height); file.load(depth); file.load(neuron_width); file.load(neuron_height); file.load(layer_depth); layer_width = width - (neuron_width - 1); layer_height = height - (neuron_height - 1); output_size = layer_width * layer_height * layer_depth; hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); } ConvolutionLayer::~ConvolutionLayer() = default; int ConvolutionLayer::get_input_offset(int i, int j, int k) { return (k*height + j)*width + i; } int ConvolutionLayer::get_output_offset(int i, int j, int k) { return (k*layer_height + j)*layer_width + i; } int ConvolutionLayer::get_parameters_offset(int current_depth, int i, int j, int k) { return ((current_depth*depth + k)*neuron_height + j)*neuron_width + i + current_depth; } } }
52965711d767ed83783f408cada6542a7c0bf68c.cu
#include "../CUDA/CUDA_func.h" #include "ConvolutionLayer.h" using namespace std; namespace NN { namespace Layers { ConvolutionLayer::ConvolutionLayer(std::vector<int> dependencies, int width, int height, int depth, int neuron_width, int neuron_height, int layer_depth) { this->dependencies = dependencies; this->width = width; this->height = height; this->depth = depth; this->neuron_width = neuron_width; this->neuron_height = neuron_height; this->layer_depth = layer_depth; layer_width = width - (neuron_width - 1); layer_height = height - (neuron_height - 1); output_size = layer_width * layer_height * layer_depth; cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void ConvolutionLayer::compute() { float * prev_layer_output = prev_layer->get_output_iterator(); /*int output_offset; for(int k = 0; k < layer_depth; k++) { for(int j = 0; j < layer_height; j++) { for(int i = 0; i < layer_width; i++) { output_offset = get_output_offset(i, j, k); output[output_offset] = 0; for(int kn = 0; kn < depth; kn++) { for(int jn = 0; jn < neuron_height; jn++) { for(int in = 0; in < neuron_width; in++) { output[output_offset] += parameters[get_parameters_offset(k, in, jn, kn)] * prev_layer_output[get_input_offset(i+in, j+jn, kn)]; } } } output[output_offset] += parameters[(k+1)*(neuron_width*neuron_height*depth + 1) - 1]; } } }*/ int input_size = width * height * depth; int layer_size = layer_height * layer_width; int neuron_size = neuron_width * neuron_height * depth; int blockSize = (output_size + 511) / 512; NN::CUDA::compute_conv_layer<<<blockSize, 512 >>> (prev_layer_output, parameters, output, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); cudaDeviceSynchronize(); } void ConvolutionLayer::backpropagate() { float * prev_layer_output = prev_layer->get_output_iterator(); float * prev_layer_output_gradient = prev_layer->get_output_gradient_iterator(); /*int output_offset; for(int k = 0; k < layer_depth; k++) { for(int j = 0; j < layer_height; j++) { for(int i = 0; i < layer_width; i++) { output_offset = get_output_offset(i, j, k); for(int kn = 0; kn < depth; kn++) { for(int jn = 0; jn < neuron_height; jn++) { for(int in = 0; in < neuron_width; in++) { gradient[get_parameters_offset(k, in, jn, kn)] += output_gradient[output_offset] * prev_layer_output[get_input_offset(i+in, j+jn, kn)]; prev_layer_output_gradient[get_input_offset(i+in, j+jn, kn)] += output_gradient[output_offset] * parameters[get_parameters_offset(k, in, jn, kn)]; } } } gradient[(k+1)*(neuron_width*neuron_height*depth + 1) - 1] += output_gradient[output_offset]; } } }*/ int input_size = width * height * depth; int layer_size = layer_height * layer_width; int neuron_size = neuron_width * neuron_height * depth; int blockSize = (input_size + 511) / 512; NN::CUDA::backprop_conv_layer_input <<<blockSize, 512 >>> (prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); cudaDeviceSynchronize(); blockSize = (layer_depth * depth + 511) / 512; NN::CUDA::backprop_conv_layer_weights <<<blockSize, 512 >>> (prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); cudaDeviceSynchronize(); blockSize = (layer_depth + 511) / 512; NN::CUDA::backprop_conv_layer_bias <<<blockSize, 512 >>> (prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, width, height, depth, layer_width, layer_height, layer_depth, neuron_width, neuron_height, input_size, output_size, layer_size, neuron_size); cudaDeviceSynchronize(); } int ConvolutionLayer::get_parameters_size() { return layer_depth*(neuron_height*neuron_width*depth + 1); } void ConvolutionLayer::update_dependencies(std::vector<NN::Layers::Layer *> layer_dependencies) { prev_layer = layer_dependencies[0]; } void ConvolutionLayer::save(NN::File& file) { int id = 2; file.save(id); save_dependencies(file); file.save(width); file.save(height); file.save(depth); file.save(neuron_width); file.save(neuron_height); file.save(layer_depth); } void ConvolutionLayer::load(NN::File& file) { load_dependencies(file); file.load(width); file.load(height); file.load(depth); file.load(neuron_width); file.load(neuron_height); file.load(layer_depth); layer_width = width - (neuron_width - 1); layer_height = height - (neuron_height - 1); output_size = layer_width * layer_height * layer_depth; cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); } ConvolutionLayer::~ConvolutionLayer() = default; int ConvolutionLayer::get_input_offset(int i, int j, int k) { return (k*height + j)*width + i; } int ConvolutionLayer::get_output_offset(int i, int j, int k) { return (k*layer_height + j)*layer_width + i; } int ConvolutionLayer::get_parameters_offset(int current_depth, int i, int j, int k) { return ((current_depth*depth + k)*neuron_height + j)*neuron_width + i + current_depth; } } }
b1fe88ab39110eccdc47e09e31035381b247e9cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <algorithm> #include <cfloat> #include "caffe/layers/depthwise_ndconv_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe{ template<typename Dtype> __global__ void ConvForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w, Dtype * const top_data, const Dtype* const weight, const Dtype* const bias, const bool bias_term_){ CUDA_KERNEL_LOOP(index, nthreads) { const int pw=index % conved_width; const int ph=(index / conved_width) % conved_height; const int pl=(index / conved_width / conved_height) % conved_lenght; const int c=(index / conved_width / conved_height / conved_lenght) % channels; const int n=index / conved_width /conved_height / conved_lenght /channels; int lstart=pl*stride_l-pad_l; int hstart=ph*stride_h-pad_h; int wstart=pw*stride_w-pad_w; int lend=min(lstart+kernel_l, lenght); int hend=min(hstart+kernel_h, height); int wend=min(wstart+kernel_w, width); lstart=max(lstart,0); hstart=max(hstart,0); wstart=max(wstart,0); Dtype aveval=0; const Dtype* const bottom_slice=bottom_data+(n*channels+c)*lenght*height*width; const Dtype* const weight_slice=weight+c*kernel_l*kernel_h*kernel_w; int klstart=lend<kernel_l?kernel_l-lend:0; int khstart=hend<kernel_h?kernel_h-hend:0; int kwstart=wend<kernel_w?kernel_w-wend:0; for(int l=lstart;l<lend;++l){ for(int h=hstart;h<hend;++h){ for(int w=wstart;w<wend;++w){ aveval+=bottom_slice[(l*height+h)*width+w]*weight_slice[((klstart+l-lstart)*kernel_h+(khstart+h-hstart))*kernel_w+(kwstart+w-wstart)]; } } } if(bias_term_){ aveval+=bias[c]; } top_data[index]=aveval; } } template<typename Dtype> void DepthwiseNdConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ const Dtype* weight = this->blobs_[0]->gpu_data(); for(int i=0;i<bottom.size();++i){ const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); const int count = top[i]->count(); vector<int> shape_=bottom[i]->shape(); const int channels_=shape_[1]; const int lenght_=shape_[2]; const int height_=shape_[3]; const int width_=shape_[4]; const int kernel_l_=this->kernel_shape_[0]; const int kernel_h_=this->kernel_shape_[1]; const int kernel_w_=this->kernel_shape_[2]; const int stride_l_=this->stride_shape_[0]; const int stride_h_=this->stride_shape_[1]; const int stride_w_=this->stride_shape_[2]; const int pad_l_=this->pad_shape_[0]; const int pad_h_=this->pad_shape_[1]; const int pad_w_=this->pad_shape_[2]; const int conved_lenght=this->output_shape_[2]; const int conved_height=this->output_shape_[3]; const int conved_width=this->output_shape_[4]; const bool bias_term_=this->bias_term_; if(bias_term_){ const Dtype* const bias=this->blobs_[1]->gpu_data(); hipLaunchKernelGGL(( ConvForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, top_data, weight, bias, bias_term_); }else{ hipLaunchKernelGGL(( ConvForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, top_data, weight, 0, bias_term_); } } } template <typename Dtype> __global__ void ConvBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w, Dtype* const bottom_diff, const Dtype* const weight){ CUDA_KERNEL_LOOP(index,nthreads){ const int w=index % width+pad_w; const int h=(index / width) % height+pad_h; const int l=(index / width / height) % lenght+pad_l; const int c=(index / width / height / lenght) % channels; const int n=index / width / height /lenght / channels; const int plstart=(l<kernel_l)?0:(l-kernel_l)/stride_l+1; const int plend=min(l/stride_l+1,conved_lenght); const int phstart=(h<kernel_h)?0:(h-kernel_h)/stride_h+1; const int phend=min(h/stride_h+1,conved_height); const int pwstart=(w<kernel_w)?0:(w-kernel_w)/stride_w+1; const int pwend=min(w/stride_w+1,conved_width); const int klstart=(l>=kernel_l)?((l-kernel_l)%stride_l)+(kernel_l-stride_l):l; const int khstart=(h>=kernel_h)?((h-kernel_h)%stride_h)+(kernel_h-stride_h):h; const int kwstart=(w>=kernel_w)?((w-kernel_w)%stride_w)+(kernel_w-stride_w):w; Dtype gradient=0; const Dtype* const top_diff_slice=top_diff+(n*channels+c)*conved_lenght*conved_height*conved_width; const Dtype* const weight_slice=weight+c*kernel_l*kernel_h*kernel_w; for(int pl=plstart;pl<plend;++pl){ for(int ph=phstart;ph<phend;++ph){ for(int pw=pwstart;pw<pwend;++pw){ int kl=klstart-(pl-plstart)*stride_l; int kh=khstart-(ph-phstart)*stride_h; int kw=kwstart-(pw-pwstart)*stride_w; gradient+=top_diff_slice[(pl*conved_height+ph)*conved_width+pw]*weight_slice[((kl*kernel_h+kh)*kernel_w+kw)]; } } } bottom_diff[index]=gradient; } } __device__ float atomicAddme(float* address, float val) { return atomicAdd(address,val); } __device__ double atomicAddme(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #define DIVIDE_CEIL(a,b) a/b+((a/b*b)<a) template <typename Dtype> __global__ void ConvBackwardWeight(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w,Dtype* const weight_diff,const Dtype* const bottom_data){ CUDA_KERNEL_LOOP(index,nthreads){ const int kw=index % kernel_w; const int kh=(index / kernel_w) % kernel_h; const int kl=(index / kernel_w / kernel_h) % kernel_l; const int c =index/ kernel_w / kernel_h / kernel_l; Dtype gradient=0; for(int n=0;n<num;++n){ const Dtype* const top_diff_slice=top_diff+(n*channels+c)*conved_lenght*conved_height*conved_width; const Dtype* const bottom_data_slice=bottom_data+(n*channels+c)*lenght*height*width; const int plstart=max(DIVIDE_CEIL((pad_l-kl),stride_l),0); const int plend=min(DIVIDE_CEIL((lenght+pad_l-kl),stride_l),conved_lenght); const int phstart=max(DIVIDE_CEIL((pad_h-kh),stride_h),0); const int phend=min(DIVIDE_CEIL((height+pad_h-kh),stride_h),conved_height); const int pwstart=max(DIVIDE_CEIL((pad_w-kw),stride_w),0); const int pwend=min(DIVIDE_CEIL((width+pad_w-kw),stride_w),conved_width); for(int pl=plstart;pl<plend;++pl){ for(int ph=phstart;ph<phend;++ph){ for(int pw=pwstart;pw<pwend;++pw){ const int l=pl*stride_l+kl-pad_l; const int h=ph*stride_h+kh-pad_h; const int w=pw*stride_w+kw-pad_w; gradient+=top_diff_slice[(pl*conved_height+ph)*conved_width+pw]*bottom_data_slice[(l*height+h)*width+w]; } } } } weight_diff[c*kernel_l*kernel_h*kernel_w+(kl*kernel_h+kh)*kernel_w+kw]+=gradient; } } template <typename Dtype> __global__ void ConvBackwardBias(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w, Dtype* const bias_diff){ CUDA_KERNEL_LOOP(index,nthreads){ const int c=index; Dtype gradient=0; for(int n=0;n<num;n++){ const Dtype* const top_diff_slice=top_diff+(n*channels+c)*conved_lenght*conved_height*conved_width; for(int pl=0;pl<conved_lenght;++pl){ for(int ph=0;ph<conved_height;++ph){ for(int pw=0;pw<conved_width;++pw){ gradient+=top_diff_slice[(pl*conved_height+ph)*conved_width+pw]; } } } } bias_diff[c]+=gradient; } } template<typename Dtype> void DepthwiseNdConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight=this->blobs_[0]->gpu_data(); Dtype* weight_diff=this->blobs_[0]->mutable_gpu_diff(); const bool bias_term_ = this->bias_term_; Dtype* bias_diff = bias_term_ ? this->blobs_[1]->mutable_gpu_diff() : 0; const bool bias_propagate_down_ = this->param_propagate_down_[1]; const bool weight_propagate_down_ = this->param_propagate_down_[0]; const int kernel_l_=this->kernel_shape_[0]; const int kernel_h_=this->kernel_shape_[1]; const int kernel_w_=this->kernel_shape_[2]; const int stride_l_=this->stride_shape_[0]; const int stride_h_=this->stride_shape_[1]; const int stride_w_=this->stride_shape_[2]; const int pad_l_=this->pad_shape_[0]; const int pad_h_=this->pad_shape_[1]; const int pad_w_=this->pad_shape_[2]; const int conved_lenght=this->output_shape_[2]; const int conved_height=this->output_shape_[3]; const int conved_width=this->output_shape_[4]; for(int i=0;i<top.size();++i){ const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); vector<int> shape_=bottom[i]->shape(); const int channels_=shape_[1]; const int lenght_=shape_[2]; const int height_=shape_[3]; const int width_=shape_[4]; //Bias gradient,if necessary. if(bias_term_&&bias_propagate_down_){ const int count_bias=channels_; hipLaunchKernelGGL(( ConvBackwardBias<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bias)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_bias, top_diff, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, bias_diff); } // gradient w.r.t. weight. Note that we will accumulate diffs. if(weight_propagate_down_){ const int count_weight=channels_*kernel_l_*kernel_h_*kernel_w_; hipLaunchKernelGGL(( ConvBackwardWeight<Dtype>), dim3(CAFFE_GET_BLOCKS(count_weight)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_weight, top_diff, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, weight_diff, bottom_data); } // gradient w.r.t. bottom data, if necessary. if(propagate_down[i]){ const int count_bottom=bottom[i]->count(); hipLaunchKernelGGL(( ConvBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bottom)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_bottom, top_diff, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, bottom_diff, weight); } } } INSTANTIATE_LAYER_GPU_FUNCS (DepthwiseNdConvolutionLayer); }//namespace caffe
b1fe88ab39110eccdc47e09e31035381b247e9cf.cu
#include <vector> #include <algorithm> #include <cfloat> #include "caffe/layers/depthwise_ndconv_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe{ template<typename Dtype> __global__ void ConvForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w, Dtype * const top_data, const Dtype* const weight, const Dtype* const bias, const bool bias_term_){ CUDA_KERNEL_LOOP(index, nthreads) { const int pw=index % conved_width; const int ph=(index / conved_width) % conved_height; const int pl=(index / conved_width / conved_height) % conved_lenght; const int c=(index / conved_width / conved_height / conved_lenght) % channels; const int n=index / conved_width /conved_height / conved_lenght /channels; int lstart=pl*stride_l-pad_l; int hstart=ph*stride_h-pad_h; int wstart=pw*stride_w-pad_w; int lend=min(lstart+kernel_l, lenght); int hend=min(hstart+kernel_h, height); int wend=min(wstart+kernel_w, width); lstart=max(lstart,0); hstart=max(hstart,0); wstart=max(wstart,0); Dtype aveval=0; const Dtype* const bottom_slice=bottom_data+(n*channels+c)*lenght*height*width; const Dtype* const weight_slice=weight+c*kernel_l*kernel_h*kernel_w; int klstart=lend<kernel_l?kernel_l-lend:0; int khstart=hend<kernel_h?kernel_h-hend:0; int kwstart=wend<kernel_w?kernel_w-wend:0; for(int l=lstart;l<lend;++l){ for(int h=hstart;h<hend;++h){ for(int w=wstart;w<wend;++w){ aveval+=bottom_slice[(l*height+h)*width+w]*weight_slice[((klstart+l-lstart)*kernel_h+(khstart+h-hstart))*kernel_w+(kwstart+w-wstart)]; } } } if(bias_term_){ aveval+=bias[c]; } top_data[index]=aveval; } } template<typename Dtype> void DepthwiseNdConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ const Dtype* weight = this->blobs_[0]->gpu_data(); for(int i=0;i<bottom.size();++i){ const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); const int count = top[i]->count(); vector<int> shape_=bottom[i]->shape(); const int channels_=shape_[1]; const int lenght_=shape_[2]; const int height_=shape_[3]; const int width_=shape_[4]; const int kernel_l_=this->kernel_shape_[0]; const int kernel_h_=this->kernel_shape_[1]; const int kernel_w_=this->kernel_shape_[2]; const int stride_l_=this->stride_shape_[0]; const int stride_h_=this->stride_shape_[1]; const int stride_w_=this->stride_shape_[2]; const int pad_l_=this->pad_shape_[0]; const int pad_h_=this->pad_shape_[1]; const int pad_w_=this->pad_shape_[2]; const int conved_lenght=this->output_shape_[2]; const int conved_height=this->output_shape_[3]; const int conved_width=this->output_shape_[4]; const bool bias_term_=this->bias_term_; if(bias_term_){ const Dtype* const bias=this->blobs_[1]->gpu_data(); ConvForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, top_data, weight, bias, bias_term_); }else{ ConvForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, top_data, weight, 0, bias_term_); } } } template <typename Dtype> __global__ void ConvBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w, Dtype* const bottom_diff, const Dtype* const weight){ CUDA_KERNEL_LOOP(index,nthreads){ const int w=index % width+pad_w; const int h=(index / width) % height+pad_h; const int l=(index / width / height) % lenght+pad_l; const int c=(index / width / height / lenght) % channels; const int n=index / width / height /lenght / channels; const int plstart=(l<kernel_l)?0:(l-kernel_l)/stride_l+1; const int plend=min(l/stride_l+1,conved_lenght); const int phstart=(h<kernel_h)?0:(h-kernel_h)/stride_h+1; const int phend=min(h/stride_h+1,conved_height); const int pwstart=(w<kernel_w)?0:(w-kernel_w)/stride_w+1; const int pwend=min(w/stride_w+1,conved_width); const int klstart=(l>=kernel_l)?((l-kernel_l)%stride_l)+(kernel_l-stride_l):l; const int khstart=(h>=kernel_h)?((h-kernel_h)%stride_h)+(kernel_h-stride_h):h; const int kwstart=(w>=kernel_w)?((w-kernel_w)%stride_w)+(kernel_w-stride_w):w; Dtype gradient=0; const Dtype* const top_diff_slice=top_diff+(n*channels+c)*conved_lenght*conved_height*conved_width; const Dtype* const weight_slice=weight+c*kernel_l*kernel_h*kernel_w; for(int pl=plstart;pl<plend;++pl){ for(int ph=phstart;ph<phend;++ph){ for(int pw=pwstart;pw<pwend;++pw){ int kl=klstart-(pl-plstart)*stride_l; int kh=khstart-(ph-phstart)*stride_h; int kw=kwstart-(pw-pwstart)*stride_w; gradient+=top_diff_slice[(pl*conved_height+ph)*conved_width+pw]*weight_slice[((kl*kernel_h+kh)*kernel_w+kw)]; } } } bottom_diff[index]=gradient; } } __device__ float atomicAddme(float* address, float val) { return atomicAdd(address,val); } __device__ double atomicAddme(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #define DIVIDE_CEIL(a,b) a/b+((a/b*b)<a) template <typename Dtype> __global__ void ConvBackwardWeight(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w,Dtype* const weight_diff,const Dtype* const bottom_data){ CUDA_KERNEL_LOOP(index,nthreads){ const int kw=index % kernel_w; const int kh=(index / kernel_w) % kernel_h; const int kl=(index / kernel_w / kernel_h) % kernel_l; const int c =index/ kernel_w / kernel_h / kernel_l; Dtype gradient=0; for(int n=0;n<num;++n){ const Dtype* const top_diff_slice=top_diff+(n*channels+c)*conved_lenght*conved_height*conved_width; const Dtype* const bottom_data_slice=bottom_data+(n*channels+c)*lenght*height*width; const int plstart=max(DIVIDE_CEIL((pad_l-kl),stride_l),0); const int plend=min(DIVIDE_CEIL((lenght+pad_l-kl),stride_l),conved_lenght); const int phstart=max(DIVIDE_CEIL((pad_h-kh),stride_h),0); const int phend=min(DIVIDE_CEIL((height+pad_h-kh),stride_h),conved_height); const int pwstart=max(DIVIDE_CEIL((pad_w-kw),stride_w),0); const int pwend=min(DIVIDE_CEIL((width+pad_w-kw),stride_w),conved_width); for(int pl=plstart;pl<plend;++pl){ for(int ph=phstart;ph<phend;++ph){ for(int pw=pwstart;pw<pwend;++pw){ const int l=pl*stride_l+kl-pad_l; const int h=ph*stride_h+kh-pad_h; const int w=pw*stride_w+kw-pad_w; gradient+=top_diff_slice[(pl*conved_height+ph)*conved_width+pw]*bottom_data_slice[(l*height+h)*width+w]; } } } } weight_diff[c*kernel_l*kernel_h*kernel_w+(kl*kernel_h+kh)*kernel_w+kw]+=gradient; } } template <typename Dtype> __global__ void ConvBackwardBias(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int lenght, const int height, const int width, const int conved_lenght, const int conved_height, const int conved_width, const int kernel_l, const int kernel_h, const int kernel_w, const int stride_l, const int stride_h, const int stride_w, const int pad_l, const int pad_h, const int pad_w, Dtype* const bias_diff){ CUDA_KERNEL_LOOP(index,nthreads){ const int c=index; Dtype gradient=0; for(int n=0;n<num;n++){ const Dtype* const top_diff_slice=top_diff+(n*channels+c)*conved_lenght*conved_height*conved_width; for(int pl=0;pl<conved_lenght;++pl){ for(int ph=0;ph<conved_height;++ph){ for(int pw=0;pw<conved_width;++pw){ gradient+=top_diff_slice[(pl*conved_height+ph)*conved_width+pw]; } } } } bias_diff[c]+=gradient; } } template<typename Dtype> void DepthwiseNdConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight=this->blobs_[0]->gpu_data(); Dtype* weight_diff=this->blobs_[0]->mutable_gpu_diff(); const bool bias_term_ = this->bias_term_; Dtype* bias_diff = bias_term_ ? this->blobs_[1]->mutable_gpu_diff() : 0; const bool bias_propagate_down_ = this->param_propagate_down_[1]; const bool weight_propagate_down_ = this->param_propagate_down_[0]; const int kernel_l_=this->kernel_shape_[0]; const int kernel_h_=this->kernel_shape_[1]; const int kernel_w_=this->kernel_shape_[2]; const int stride_l_=this->stride_shape_[0]; const int stride_h_=this->stride_shape_[1]; const int stride_w_=this->stride_shape_[2]; const int pad_l_=this->pad_shape_[0]; const int pad_h_=this->pad_shape_[1]; const int pad_w_=this->pad_shape_[2]; const int conved_lenght=this->output_shape_[2]; const int conved_height=this->output_shape_[3]; const int conved_width=this->output_shape_[4]; for(int i=0;i<top.size();++i){ const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); vector<int> shape_=bottom[i]->shape(); const int channels_=shape_[1]; const int lenght_=shape_[2]; const int height_=shape_[3]; const int width_=shape_[4]; //Bias gradient,if necessary. if(bias_term_&&bias_propagate_down_){ const int count_bias=channels_; ConvBackwardBias<Dtype><<<CAFFE_GET_BLOCKS(count_bias), CAFFE_CUDA_NUM_THREADS>>>( count_bias, top_diff, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, bias_diff); } // gradient w.r.t. weight. Note that we will accumulate diffs. if(weight_propagate_down_){ const int count_weight=channels_*kernel_l_*kernel_h_*kernel_w_; ConvBackwardWeight<Dtype><<<CAFFE_GET_BLOCKS(count_weight), CAFFE_CUDA_NUM_THREADS>>>( count_weight, top_diff, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, weight_diff, bottom_data); } // gradient w.r.t. bottom data, if necessary. if(propagate_down[i]){ const int count_bottom=bottom[i]->count(); ConvBackward<Dtype><<<CAFFE_GET_BLOCKS(count_bottom), CAFFE_CUDA_NUM_THREADS>>>( count_bottom, top_diff, bottom[i]->num(), channels_, lenght_, height_, width_, conved_lenght, conved_height, conved_width, kernel_l_, kernel_h_, kernel_w_, stride_l_, stride_h_, stride_w_, pad_l_, pad_h_, pad_w_, bottom_diff, weight); } } } INSTANTIATE_LAYER_GPU_FUNCS (DepthwiseNdConvolutionLayer); }//namespace caffe