hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
c8a3f9cdf26510ade198e1712f784d48d8c9d8b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cstdio> #include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<cuda_runtime.h> using std::cout; using std::endl; #define SQR(x) ((x)*(x)) #define POW2(x) SQR(x) #define POW3(x) ((x)*(x)*(x)) #define POW4(x) (POW2(x)*POW2(x)) #define POW7(x) (POW3(x)*POW3(x)*(x)) #define DegToRad(x) ((x)*M_PI/180) #define RadToDeg(x) ((x)/M_PI*180) static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number) { if(err!=hipSuccess) { fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) // __device__ std::vector<unsigned char> rgb2lab(const unsigned char r,const unsigned char g,const unsigned char b){ // r = r / 255, // g = g / 255, // b = b / 255, // double x, y, z; // r = (r > 0.04045) ? Math.pow((r + 0.055) / 1.055, 2.4) : r / 12.92; // g = (g > 0.04045) ? Math.pow((g + 0.055) / 1.055, 2.4) : g / 12.92; // b = (b > 0.04045) ? Math.pow((b + 0.055) / 1.055, 2.4) : b / 12.92; // x = (r * 0.4124 + g * 0.3576 + b * 0.1805) / 0.95047; // y = (r * 0.2126 + g * 0.7152 + b * 0.0722) / 1.00000; // z = (r * 0.0193 + g * 0.1192 + b * 0.9505) / 1.08883; // x = (x > 0.008856) ? Math.pow(x, 1/3) : (7.787 * x) + 16/116; // y = (y > 0.008856) ? Math.pow(y, 1/3) : (7.787 * y) + 16/116; // z = (z > 0.008856) ? Math.pow(z, 1/3) : (7.787 * z) + 16/116; // std::vector<unsigned char> lab; // unsigned char l,a,bb; // l = (116 * y) - 16; // a = 500 * (x - y); // bb = 200 * (y - z); // lab.push_back(l); // lab.push_back(a); // lab.push_back(bb); // return lab; // } // __device__ double color_distance(const unsigned char l1,const unsigned char a1,const unsigned char b1, // const unsigned char l2,const unsigned char a2,const unsigned char b2){ // double eps = 1e-5; // double c1 = sqrtf(SQR(a1) + SQR(b1)); // double c2 = sqrtf(SQR(a2) + SQR(b2)); // double meanC = (c1 + c2) / 2.0; // double meanC7 = POW7(meanC); // double g = 0.5*(1 - sqrtf(meanC7 / (meanC7 + 6103515625.))); // 0.5*(1-sqrt(meanC^7/(meanC^7+25^7))) // double a1p = a1 * (1 + g); // double a2p = a2 * (1 + g); // c1 = sqrtf(SQR(a1p) + SQR(b1)); // c2 = sqrtf(SQR(a2p) + SQR(b2)); // double h1 = fmodf(atan2f(b1, a1p) + 2*M_PI, 2*M_PI); // double h2 = fmodf(atan2f(b2, a2p) + 2*M_PI, 2*M_PI); // // compute deltaL, deltaC, deltaH // double deltaL = l2 - l1; // double deltaC = c2 - c1; // double deltah; // if (c1*c2 < eps) { // deltah = 0; // } // if (std::abs(h2 - h1) <= M_PI) { // deltah = h2 - h1; // } // else if (h2 > h1) { // deltah = h2 - h1 - 2* M_PI; // } // else { // deltah = h2 - h1 + 2 * M_PI; // } // double deltaH = 2 * sqrtf(c1*c2)*sinf(deltah / 2); // // calculate CIEDE2000 // double meanL = (l1 + l2) / 2; // meanC = (c1 + c2) / 2.0; // meanC7 = POW7(meanC); // double meanH; // if (c1*c2 < eps) { // meanH = h1 + h2; // } // if (std::abs(h1 - h2) <= M_PI + eps) { // meanH = (h1 + h2) / 2; // } // else if (h1 + h2 < 2*M_PI) { // meanH = (h1 + h2 + 2*M_PI) / 2; // } // else { // meanH = (h1 + h2 - 2*M_PI) / 2; // } // double T = 1 // - 0.17*cosf(meanH - DegToRad(30)) // + 0.24*cosf(2 * meanH) // + 0.32*cosf(3 * meanH + DegToRad(6)) // - 0.2*cosf(4 * meanH - DegToRad(63)); // double sl = 1 + (0.015*SQR(meanL - 50)) / sqrtf(20 + SQR(meanL - 50)); // double sc = 1 + 0.045*meanC; // double sh = 1 + 0.015*meanC*T; // double rc = 2 * sqrtf(meanC7 / (meanC7 + 6103515625.)); // double rt = -sinf(DegToRad(60 * expf(-SQR((RadToDeg(meanH) - 275) / 25)))) * rc; // double cur_dist = sqrtf(SQR(deltaL / sl) + SQR(deltaC / sc) + SQR(deltaH / sh) + rt * deltaC / sc * deltaH / sh); // return cur_dist; // } // __global__ void bgr_to_gray_kernel( unsigned char* input, // unsigned char* input1, // unsigned char* output, // int width, // int height, // int colorWidthStep, // int grayWidthStep) // { // //2D Index of current thread // const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; // const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; // int real_distance; // //Only valid threads perform memory I/O // if((xIndex<width) && (yIndex<height)) // { // //Location of colored pixel in input // bool valid = false; // int real_distance; // const int color_tid = yIndex * colorWidthStep + (3 * xIndex); // const unsigned char l1 = input[color_tid]; // const unsigned char a1 = input[color_tid + 1]; // const unsigned char b1 = input[color_tid + 2]; // //Location of gray pixel in output // const int gray_tid = yIndex * grayWidthStep + xIndex; // for(int i = -2; i <3;i++){ // int row = yIndex+i; // int col = xIndex+i; // if(row >= 0 && row <height && col >= 0 && col <width){ // const int color_tid_input1 = (row) * colorWidthStep + (3 * col); // const unsigned char l2 = input1[color_tid_input1]; // const unsigned char a2 = input1[color_tid_input1 + 1]; // const unsigned char b2 = input1[color_tid_input1 + 2]; // double cur_dist=color_distance(l1,a1,b1,l2,a2,b2); // if(cur_dist < 20){ // valid = true; // } // if(i==0){ // real_distance = cur_dist; // } // } // } // float gray; // if(valid){ // gray = 0; // }else{ // gray = 1; // } // output[gray_tid] = static_cast<unsigned char>(gray); // } // } // int *difffilter(const cv::Mat& input,const cv::Mat& input1, cv::Mat& output) // { // //Calculate total number of bytes of input and output image // const int colorBytes = input.step * input.rows; // const int grayBytes = output.step * output.rows; // unsigned char *d_input,*d_input1, *d_output; // //Allocate device memory // SAFE_CALL(hipMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed"); // SAFE_CALL(hipMalloc<unsigned char>(&d_input1,colorBytes),"CUDA Malloc Failed"); // SAFE_CALL(hipMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed"); // //Copy data from OpenCV input image to device memory // SAFE_CALL(hipMemcpy(d_input,input.ptr(),colorBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); // SAFE_CALL(hipMemcpy(d_input1,input1.ptr(),colorBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); // //Specify a reasonable block size // const dim3 block(16,16); // //Calculate grid size to cover the whole image // const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y); // //Launch the color conversion kernel // bgr_to_gray_kernel<<<grid,block>>>(d_input,d_input1,d_output,input.cols,input.rows,input.step,output.step); // //Synchronize to check for any kernel launch errors // SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed"); // //Copy back data from destination device meory to OpenCV output image // SAFE_CALL(hipMemcpy(output.ptr(),d_output,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed"); // //Free the device memory // SAFE_CALL(hipFree(d_input),"CUDA Free Failed"); // SAFE_CALL(hipFree(d_input1),"CUDA Free Failed"); // SAFE_CALL(hipFree(d_output),"CUDA Free Failed"); // return 0; // }
c8a3f9cdf26510ade198e1712f784d48d8c9d8b8.cu
#include<iostream> #include<cstdio> #include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<cuda_runtime.h> using std::cout; using std::endl; #define SQR(x) ((x)*(x)) #define POW2(x) SQR(x) #define POW3(x) ((x)*(x)*(x)) #define POW4(x) (POW2(x)*POW2(x)) #define POW7(x) (POW3(x)*POW3(x)*(x)) #define DegToRad(x) ((x)*M_PI/180) #define RadToDeg(x) ((x)/M_PI*180) static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number) { if(err!=cudaSuccess) { fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) // __device__ std::vector<unsigned char> rgb2lab(const unsigned char r,const unsigned char g,const unsigned char b){ // r = r / 255, // g = g / 255, // b = b / 255, // double x, y, z; // r = (r > 0.04045) ? Math.pow((r + 0.055) / 1.055, 2.4) : r / 12.92; // g = (g > 0.04045) ? Math.pow((g + 0.055) / 1.055, 2.4) : g / 12.92; // b = (b > 0.04045) ? Math.pow((b + 0.055) / 1.055, 2.4) : b / 12.92; // x = (r * 0.4124 + g * 0.3576 + b * 0.1805) / 0.95047; // y = (r * 0.2126 + g * 0.7152 + b * 0.0722) / 1.00000; // z = (r * 0.0193 + g * 0.1192 + b * 0.9505) / 1.08883; // x = (x > 0.008856) ? Math.pow(x, 1/3) : (7.787 * x) + 16/116; // y = (y > 0.008856) ? Math.pow(y, 1/3) : (7.787 * y) + 16/116; // z = (z > 0.008856) ? Math.pow(z, 1/3) : (7.787 * z) + 16/116; // std::vector<unsigned char> lab; // unsigned char l,a,bb; // l = (116 * y) - 16; // a = 500 * (x - y); // bb = 200 * (y - z); // lab.push_back(l); // lab.push_back(a); // lab.push_back(bb); // return lab; // } // __device__ double color_distance(const unsigned char l1,const unsigned char a1,const unsigned char b1, // const unsigned char l2,const unsigned char a2,const unsigned char b2){ // double eps = 1e-5; // double c1 = sqrtf(SQR(a1) + SQR(b1)); // double c2 = sqrtf(SQR(a2) + SQR(b2)); // double meanC = (c1 + c2) / 2.0; // double meanC7 = POW7(meanC); // double g = 0.5*(1 - sqrtf(meanC7 / (meanC7 + 6103515625.))); // 0.5*(1-sqrt(meanC^7/(meanC^7+25^7))) // double a1p = a1 * (1 + g); // double a2p = a2 * (1 + g); // c1 = sqrtf(SQR(a1p) + SQR(b1)); // c2 = sqrtf(SQR(a2p) + SQR(b2)); // double h1 = fmodf(atan2f(b1, a1p) + 2*M_PI, 2*M_PI); // double h2 = fmodf(atan2f(b2, a2p) + 2*M_PI, 2*M_PI); // // compute deltaL, deltaC, deltaH // double deltaL = l2 - l1; // double deltaC = c2 - c1; // double deltah; // if (c1*c2 < eps) { // deltah = 0; // } // if (std::abs(h2 - h1) <= M_PI) { // deltah = h2 - h1; // } // else if (h2 > h1) { // deltah = h2 - h1 - 2* M_PI; // } // else { // deltah = h2 - h1 + 2 * M_PI; // } // double deltaH = 2 * sqrtf(c1*c2)*sinf(deltah / 2); // // calculate CIEDE2000 // double meanL = (l1 + l2) / 2; // meanC = (c1 + c2) / 2.0; // meanC7 = POW7(meanC); // double meanH; // if (c1*c2 < eps) { // meanH = h1 + h2; // } // if (std::abs(h1 - h2) <= M_PI + eps) { // meanH = (h1 + h2) / 2; // } // else if (h1 + h2 < 2*M_PI) { // meanH = (h1 + h2 + 2*M_PI) / 2; // } // else { // meanH = (h1 + h2 - 2*M_PI) / 2; // } // double T = 1 // - 0.17*cosf(meanH - DegToRad(30)) // + 0.24*cosf(2 * meanH) // + 0.32*cosf(3 * meanH + DegToRad(6)) // - 0.2*cosf(4 * meanH - DegToRad(63)); // double sl = 1 + (0.015*SQR(meanL - 50)) / sqrtf(20 + SQR(meanL - 50)); // double sc = 1 + 0.045*meanC; // double sh = 1 + 0.015*meanC*T; // double rc = 2 * sqrtf(meanC7 / (meanC7 + 6103515625.)); // double rt = -sinf(DegToRad(60 * expf(-SQR((RadToDeg(meanH) - 275) / 25)))) * rc; // double cur_dist = sqrtf(SQR(deltaL / sl) + SQR(deltaC / sc) + SQR(deltaH / sh) + rt * deltaC / sc * deltaH / sh); // return cur_dist; // } // __global__ void bgr_to_gray_kernel( unsigned char* input, // unsigned char* input1, // unsigned char* output, // int width, // int height, // int colorWidthStep, // int grayWidthStep) // { // //2D Index of current thread // const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; // const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; // int real_distance; // //Only valid threads perform memory I/O // if((xIndex<width) && (yIndex<height)) // { // //Location of colored pixel in input // bool valid = false; // int real_distance; // const int color_tid = yIndex * colorWidthStep + (3 * xIndex); // const unsigned char l1 = input[color_tid]; // const unsigned char a1 = input[color_tid + 1]; // const unsigned char b1 = input[color_tid + 2]; // //Location of gray pixel in output // const int gray_tid = yIndex * grayWidthStep + xIndex; // for(int i = -2; i <3;i++){ // int row = yIndex+i; // int col = xIndex+i; // if(row >= 0 && row <height && col >= 0 && col <width){ // const int color_tid_input1 = (row) * colorWidthStep + (3 * col); // const unsigned char l2 = input1[color_tid_input1]; // const unsigned char a2 = input1[color_tid_input1 + 1]; // const unsigned char b2 = input1[color_tid_input1 + 2]; // double cur_dist=color_distance(l1,a1,b1,l2,a2,b2); // if(cur_dist < 20){ // valid = true; // } // if(i==0){ // real_distance = cur_dist; // } // } // } // float gray; // if(valid){ // gray = 0; // }else{ // gray = 1; // } // output[gray_tid] = static_cast<unsigned char>(gray); // } // } // int *difffilter(const cv::Mat& input,const cv::Mat& input1, cv::Mat& output) // { // //Calculate total number of bytes of input and output image // const int colorBytes = input.step * input.rows; // const int grayBytes = output.step * output.rows; // unsigned char *d_input,*d_input1, *d_output; // //Allocate device memory // SAFE_CALL(cudaMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed"); // SAFE_CALL(cudaMalloc<unsigned char>(&d_input1,colorBytes),"CUDA Malloc Failed"); // SAFE_CALL(cudaMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed"); // //Copy data from OpenCV input image to device memory // SAFE_CALL(cudaMemcpy(d_input,input.ptr(),colorBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); // SAFE_CALL(cudaMemcpy(d_input1,input1.ptr(),colorBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); // //Specify a reasonable block size // const dim3 block(16,16); // //Calculate grid size to cover the whole image // const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y); // //Launch the color conversion kernel // bgr_to_gray_kernel<<<grid,block>>>(d_input,d_input1,d_output,input.cols,input.rows,input.step,output.step); // //Synchronize to check for any kernel launch errors // SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed"); // //Copy back data from destination device meory to OpenCV output image // SAFE_CALL(cudaMemcpy(output.ptr(),d_output,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed"); // //Free the device memory // SAFE_CALL(cudaFree(d_input),"CUDA Free Failed"); // SAFE_CALL(cudaFree(d_input1),"CUDA Free Failed"); // SAFE_CALL(cudaFree(d_output),"CUDA Free Failed"); // return 0; // }
abd70a21dfc86e57cc170e61f4ab108c98e3203c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated s Wed Aug 14 12:16:36 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches slacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void sgeadd_batched_kernel( int m, int n, float alpha, const float * const *dAarray, int ldda, float **dBarray, int lddb ) { // dA and dB iterate across row i const float *dA = dAarray[ blockIdx.y ]; float *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const float *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_sgeadd_batched( magma_int_t m, magma_int_t n, float alpha, const float * const *dAarray, magma_int_t ldda, float **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Purpose ======= ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. ALPHA (input) COMPLEX DOUBLE PRECISION The scalar alpha. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); hipLaunchKernelGGL(( sgeadd_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dAarray, ldda, dBarray, lddb ); }
abd70a21dfc86e57cc170e61f4ab108c98e3203c.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated s Wed Aug 14 12:16:36 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches slacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void sgeadd_batched_kernel( int m, int n, float alpha, const float * const *dAarray, int ldda, float **dBarray, int lddb ) { // dA and dB iterate across row i const float *dA = dAarray[ blockIdx.y ]; float *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const float *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_sgeadd_batched( magma_int_t m, magma_int_t n, float alpha, const float * const *dAarray, magma_int_t ldda, float **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Purpose ======= ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. ALPHA (input) COMPLEX DOUBLE PRECISION The scalar alpha. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); sgeadd_batched_kernel<<< grid, threads, 0, magma_stream >>>( m, n, alpha, dAarray, ldda, dBarray, lddb ); }
f7902763c79dd6cd206a77d4a970ad8cd6b3cbf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* The implementation of this file is based on qkvToContext plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Modifications: scaling is moved from masked softmax to the gemm before that. // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <hipcub/hipcub.hpp> #include <rocblas.h> #include <hip/hip_fp16.h> #include <math_constants.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "attention_impl.h" using namespace onnxruntime::cuda; using namespace cub; namespace onnxruntime { namespace contrib { namespace cuda { static size_t AlignTo(size_t a, size_t b) { return CeilDiv(a, b) * b; } size_t ScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) { const size_t len = batch_size * num_heads * sequence_length * all_sequence_length; const size_t bytes = len * element_size; const size_t alignment = 256; const size_t bytesAligned = AlignTo(bytes, alignment); return bytesAligned; } size_t GetAttentionWorkspaceSize( size_t element_size, int batch_size, int num_heads, int head_size, int sequence_length, int past_sequence_length) { size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size; return qkv_size + 2 * ScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length); } template <typename T, unsigned TPB> __device__ inline void Softmax(const int all_sequence_length, const int sequence_length, const int valid_end, const int valid_start, const T* input, T* output) { using BlockReduce = hipcub::BlockReduce<float, TPB>; __shared__ typename BlockReduce::TempStorage tmp_storage; __shared__ float sum_reverse_block; __shared__ float max_block; float thread_data_max(-CUDART_INF_F); // e^x is represented as infinity if x is large enough, like 100.f. // Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough. // a math transform as below is leveraged to get a stable softmax: // e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max)) const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length; for (int i = threadIdx.x; i < valid_end; i += TPB) { if (i >= valid_start) { const int index = offset + i; if (thread_data_max < float(input[index])) { thread_data_max = float(input[index]); } } } const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, hipcub::Max()); // Store max value if (threadIdx.x == 0) { max_block = max; } __syncthreads(); float thread_data_sum(0.f); for (int i = threadIdx.x; i < valid_end; i += TPB) { if (i >= valid_start) { const int index = offset + i; const float val = input[index]; thread_data_sum += expf(val - max_block); } } const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_sum, hipcub::Sum()); if (threadIdx.x == 0) { sum_reverse_block = 1.f / sum; } __syncthreads(); for (int i = threadIdx.x; i < all_sequence_length; i += TPB) { const int index = offset + i; const float val = (i >= valid_start && i < valid_end) ? expf(float(input[index]) - max_block) * sum_reverse_block : 0.f; output[index] = T(val); } } template <typename T, unsigned TPB> __device__ inline void SoftmaxSmall(const int all_sequence_length, const int sequence_length, const int valid_end, const int valid_start, const T* input, T* output, bool is_unidirectional) { using BlockReduce = hipcub::BlockReduce<float, TPB>; __shared__ typename BlockReduce::TempStorage tmp_storage; __shared__ float sum_reverse_block; __shared__ float max_block; // Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S; const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length; const int index = offset + threadIdx.x; bool is_valid = false; // whether it has attention mask == 1. // Update end position for unidirectional. int end = valid_end; if (is_unidirectional) { int end_unid = all_sequence_length - sequence_length + (blockIdx.x % sequence_length) + 1; if (end_unid <= valid_start) { // In this situation, mask of [0, end_unid) and [valid_start, valid_end) has -10000, and [end_unid, valid_start) and [valid_end, all_seq_len) has -20000. // So [0, end_unid) will also have value after softmax. is_valid = threadIdx.x < end_unid; } else { end = min(valid_end, end_unid); } } is_valid = is_valid || (threadIdx.x >= valid_start && threadIdx.x < end); // e^x is represented as infinity if x is large enough, like 100.f. // Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough. // a math transform as below is leveraged to get a stable softmax: // e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max)) float thread_data_max = is_valid ? float(input[index]) : float(-CUDART_INF_F); const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, hipcub::Max(), end); // Store max value if (threadIdx.x == 0) { max_block = max; } __syncthreads(); float thread_data_exp(0.f); if (is_valid) { thread_data_exp = expf(float(input[index]) - max_block); } const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, hipcub::Sum(), end); // Store value of 1.0/sum. if (threadIdx.x == 0) { sum_reverse_block = (1.f) / sum; } __syncthreads(); // threadIdx.x might be larger than all_sequence_length due to alignment to 32x. if (threadIdx.x < all_sequence_length) { output[index] = T(thread_data_exp * sum_reverse_block); } } template <typename T, unsigned TPB> __device__ inline void SoftmaxWithMask2DSmall(const int all_sequence_length, const int sequence_length, const int* attention_mask, // 2D attention mask const T* input, T* output, const bool is_unidirectional, const float scalar) { using BlockReduce = hipcub::BlockReduce<float, TPB>; __shared__ typename BlockReduce::TempStorage tmp_storage; __shared__ float sum_reverse_block; __shared__ float max_block; // Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S; int index = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length + threadIdx.x; float thread_data = -CUDART_INF_F; if (threadIdx.x < all_sequence_length) { const int& mask = attention_mask[blockIdx.y * all_sequence_length + threadIdx.x]; float mask_value = mask > 0 ? 0.0f : -10000.0f; if (is_unidirectional) { int from_index = all_sequence_length - sequence_length + (blockIdx.x % sequence_length); // offset of from token in all sequence length. if (threadIdx.x > from_index) { mask_value += -10000.0f; } } thread_data = float(input[index]) * scalar + mask_value; } const float max = BlockReduce(tmp_storage).Reduce(thread_data, hipcub::Max(), all_sequence_length); // Store max value if (threadIdx.x == 0) { max_block = max; } __syncthreads(); float thread_data_exp = threadIdx.x < all_sequence_length ? expf(thread_data - max_block) : 0.0f; const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, hipcub::Sum(), all_sequence_length); // Store value of 1.0/sum if (threadIdx.x == 0) { sum_reverse_block = (1.f) / sum; } __syncthreads(); if (threadIdx.x < all_sequence_length) { output[index] = T(thread_data_exp * sum_reverse_block); } } template <typename T, unsigned TPB> __global__ void SoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const T* input, T* output, bool is_unidirectional) { SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output, is_unidirectional); } template <typename T, unsigned TPB> __global__ void SoftmaxKernel(const int all_sequence_length, const int sequence_length, const T* input, T* output) { Softmax<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output); } template <typename T> bool ComputeSoftmax( hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads, const T* input, T* output, bool is_unidirectional) { const dim3 grid(sequence_length * num_heads, batch_size, 1); if (all_sequence_length <= 32) { const int blockSize = 32; hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 64) { const int blockSize = 64; hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 128) { const int blockSize = 128; hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 256) { const int blockSize = 256; hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 512) { const int blockSize = 512; hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 1024) { const int blockSize = 1024; hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (!is_unidirectional) { const int blockSize = 1024; hipLaunchKernelGGL(( SoftmaxKernel<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output); } else { ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024."); } return CUDA_CALL(hipPeekAtLastError()); } template <typename T, unsigned TPB> __global__ void MaskedSoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output, bool is_unidirectional) { __shared__ int start_position; __shared__ int end_position; if (threadIdx.x == 0) { const int batch = blockIdx.y; start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0; end_position = min(all_sequence_length, mask_end[batch]); // Attend to no word has same effect as attend to all words. This is added to get parity with CPU result. if (start_position >= end_position) { start_position = 0; end_position = all_sequence_length; } } __syncthreads(); SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output, is_unidirectional); } template <typename T, unsigned TPB> __global__ void MaskedSoftmaxKernel(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output) { __shared__ int start_position; __shared__ int end_position; if (threadIdx.x == 0) { const int batch = blockIdx.y; start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0; end_position = min(all_sequence_length, mask_end[batch]); // Attend to no word has same effect as attend to all words. This is added to get parity with CPU result. if (start_position >= end_position) { start_position = 0; end_position = all_sequence_length; } } __syncthreads(); Softmax<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output); } template <typename T, unsigned TPB> __global__ void SoftmaxWithMask2DSmallKernel(const int all_sequence_length, const int sequence_length, const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) { SoftmaxWithMask2DSmall<T, TPB>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } template <typename T> bool ComputeSoftmaxWithMask1D(hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads, const int* mask_index, const int* mask_start, const T* input, T* output, const bool is_unidirectional) { const dim3 grid(sequence_length * num_heads, batch_size, 1); if (all_sequence_length <= 32) { const int blockSize = 32; hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 64) { const int blockSize = 64; hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 128) { const int blockSize = 128; hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 256) { const int blockSize = 256; hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 512) { const int blockSize = 512; hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 1024) { const int blockSize = 1024; hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (!is_unidirectional) { const int blockSize = 1024; hipLaunchKernelGGL(( MaskedSoftmaxKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output); } else { ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024."); } return CUDA_CALL(hipPeekAtLastError()); } template <typename T> bool ComputeSoftmaxWithMask2D(hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads, const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) { const dim3 grid(sequence_length * num_heads, batch_size, 1); if (all_sequence_length <= 32) { const int blockSize = 32; hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 64) { const int blockSize = 64; hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 128) { const int blockSize = 128; hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 256) { const int blockSize = 256; hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 512) { const int blockSize = 512; hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 1024) { const int blockSize = 1024; hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else { ORT_THROW("Attention CUDA operator does not supported 2D attention mask with total sequence length > 1024."); } return CUDA_CALL(hipPeekAtLastError()); } template <typename T> __global__ void TransposeCtx(const int H, const T* input, T* output) { // Input: BxNxSxH // Output: BxSxNxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int num_heads = blockDim.y; int sequence_length = gridDim.x; const int NH = num_heads * H; const int NHS = NH * sequence_length; const int in_offset = s * H + n * sequence_length * H + b * NHS; const int out_offset = n * H + s * NH + b * NHS; const int i = threadIdx.x; if (i < H) { output[out_offset + i] = input[in_offset + i]; } } bool LaunchTransCtx(hipStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const float* input, float* output) { const dim3 grid(sequence_length, batch_size, 1); if (0 == (head_size & 1)) { const int H = head_size / 2; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); const dim3 block(H, num_heads, 1); hipLaunchKernelGGL(( TransposeCtx<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2); } else { const dim3 block(head_size, num_heads, 1); hipLaunchKernelGGL(( TransposeCtx<float>), dim3(grid), dim3(block), 0, stream, head_size, input, output); } return CUDA_CALL(hipPeekAtLastError()); } bool LaunchTransCtx(hipStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const half* input, half* output) { const dim3 grid(sequence_length, batch_size, 1); if (0 == (head_size % 4)) { const int H = head_size / 4; const dim3 block(H, num_heads, 1); const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); hipLaunchKernelGGL(( TransposeCtx<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2); } else if (0 == (head_size & 1)) { const int H = head_size / 2; const dim3 block(H, num_heads, 1); const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); hipLaunchKernelGGL(( TransposeCtx<half2>), dim3(grid), dim3(block), 0, stream, H, input2, output2); } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel. const dim3 block(head_size, num_heads, 1); hipLaunchKernelGGL(( TransposeCtx<half>), dim3(grid), dim3(block), 0, stream, head_size, input, output); } return CUDA_CALL(hipPeekAtLastError()); } template <typename T> __global__ void TransposeQKV(const int H, const T* input, T* output) { // Input: BxSx3xNxH // Output: 3xBxNxSxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int m = blockIdx.z; // matrix id const int num_heads = blockDim.y; const int sequence_length = gridDim.x; const int batch_size = gridDim.y; const int NH = num_heads * H; const int NHS = NH * sequence_length; const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3; const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size; const int i = threadIdx.x; if (i < H) { output[out_offset + i] = input[in_offset + i]; } } bool LaunchTransQkv(hipStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const float* input, float* output) { const dim3 grid(sequence_length, batch_size, 3); if (0 == (head_size & 1)) { const int H = head_size / 2; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); const dim3 block(H, num_heads, 1); hipLaunchKernelGGL(( TransposeQKV<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2); } else { const dim3 block(head_size, num_heads, 1); hipLaunchKernelGGL(( TransposeQKV<float>), dim3(grid), dim3(block), 0, stream, head_size, input, output); } return CUDA_CALL(hipPeekAtLastError()); } bool LaunchTransQkv(hipStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const half* input, half* output) { const dim3 grid(sequence_length, batch_size, 3); if (0 == (head_size % 4)) { const int H = head_size / 4; const dim3 block(H, num_heads, 1); const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); hipLaunchKernelGGL(( TransposeQKV<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2); } else if (0 == (head_size & 1)) { const int H = head_size / 2; const dim3 block(H, num_heads, 1); const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); hipLaunchKernelGGL(( TransposeQKV<half2>), dim3(grid), dim3(block), 0, stream, H, input2, output2); } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.. const dim3 block(head_size, num_heads, 1); hipLaunchKernelGGL(( TransposeQKV<half>), dim3(grid), dim3(block), 0, stream, head_size, input, output); } return CUDA_CALL(hipPeekAtLastError()); } template <typename T> __global__ void ConcatPastToPresent(const int sequence_length, const T* past, const T* k_v, T* present) { const int h = threadIdx.x; const int n = threadIdx.y; const int s = blockIdx.x; const int b = blockIdx.y; const int is_v = blockIdx.z; // 0 for k, 1 for v const int all_sequence_length = gridDim.x; const int batch_size = gridDim.y; const int num_heads = blockDim.y; const int H = blockDim.x; // past: 2 x BxNxS'xH (past_k and past_v) // k_v: 2 x BxNxSxH (k and v) // present: 2 x BxNxS*xH (present_k and present_v) const int past_sequence_length = all_sequence_length - sequence_length; const int present_SH = all_sequence_length * H; const int present_NSH = num_heads * present_SH; int out_offset = b * present_NSH + n * present_SH + s * H + h + is_v * (present_NSH * batch_size); if (s < past_sequence_length) { const int past_SH = past_sequence_length * H; const int past_NSH = num_heads * past_SH; const int in_offset = b * past_NSH + n * past_SH + s * H + h + is_v * (past_NSH * batch_size); present[out_offset] = past[in_offset]; } else if (s < all_sequence_length) { const int SH = sequence_length * H; const int NSH = num_heads * SH; const int in_offset = b * NSH + n * SH + (s - past_sequence_length) * H + h + is_v * (NSH * batch_size); present[out_offset] = k_v[in_offset]; } } bool LaunchConcatPastToPresent(hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const float* past, const float* k_v, float* present) { const dim3 grid(all_sequence_length, batch_size, 2); if (0 == (head_size & 1)) { const dim3 block(head_size / 2, num_heads, 1); hipLaunchKernelGGL(( ConcatPastToPresent<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present)); } else { const dim3 block(head_size, num_heads, 1); hipLaunchKernelGGL(( ConcatPastToPresent<float>), dim3(grid), dim3(block), 0, stream, sequence_length, past, k_v, present); } return CUDA_CALL(hipPeekAtLastError()); } bool LaunchConcatPastToPresent(hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const half* past, const half* k_v, half* present) { const dim3 grid(all_sequence_length, batch_size, 2); if (0 == (head_size % 4)) { const dim3 block(head_size / 4, num_heads, 1); hipLaunchKernelGGL(( ConcatPastToPresent<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present)); } else if (0 == (head_size & 1)) { const dim3 block(head_size / 2, num_heads, 1); hipLaunchKernelGGL(( ConcatPastToPresent<half2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const half2*>(past), reinterpret_cast<const half2*>(k_v), reinterpret_cast<half2*>(present)); } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel. const dim3 block(head_size, num_heads, 1); hipLaunchKernelGGL(( ConcatPastToPresent<half>), dim3(grid), dim3(block), 0, stream, sequence_length, past, k_v, present); } return CUDA_CALL(hipPeekAtLastError()); } hipblasStatus_t inline CublasGemmStridedBatched( hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const float alpha, const float* A, int lda, long long int strideA, const float* B, int ldb, long long int strideB, const float beta, float* C, int ldc, long long int strideC, int batchCount) { return hipblasSgemmStridedBatched( handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount); } hipblasStatus_t inline CublasGemmStridedBatched( hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const half alpha, const half* A, int lda, long long int strideA, const half* B, int ldb, long long int strideB, const half beta, half* C, int ldc, long long int strideC, int batchCount) { return hipblasHgemmStridedBatched( handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount); } template <typename T> bool QkvToContext( hipblasHandle_t& cublas, hipStream_t stream, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size, const T* input, T* output, T* workspace, const int* mask_index, const std::vector<int64_t>* mask_index_dims, bool is_unidirectional, int past_sequence_length, const T* past, T* present) { const int all_sequence_length = past_sequence_length + sequence_length; const size_t bytes = ScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length); T* scratch1 = workspace; T* scratch2 = scratch1 + (bytes / element_size); T* scratch3 = scratch2 + (bytes / element_size); // input should be BxSx3xNxH => scratch3: 3xBxNxSxH if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, scratch3)) { return false; } // now scratch3 has Q, K, V: each has size BxNxSxH const int batches = batch_size * num_heads; const int size_per_batch = sequence_length * head_size; const int total_size = batches * size_per_batch; const T* q = scratch3; const T* k = q + total_size; const T* v = k + total_size; hipblasSetStream(cublas, stream); CublasMathModeSetter helper(cublas, CUBLAS_TENSOR_OP_MATH); // Concat past (2xBxNxS'xH) to present (2xBxNxS*xH): // past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH) // past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH) const int present_size_per_batch = all_sequence_length * head_size; if (nullptr != present) { if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, past, k, present)) { return false; } // update pointers to present_k and present_v. k = present; v = present + batches * present_size_per_batch; } bool use_2d_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() == 2); // compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS* // Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS* const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size)); const int temp_matrix_size = sequence_length * all_sequence_length; T alpha = (T)(use_2d_attention_mask ? 1.0f : rsqrt_head_size); if (!CUBLAS_CALL(CublasGemmStridedBatched( cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, all_sequence_length, sequence_length, head_size, alpha, k, head_size, present_size_per_batch, q, head_size, size_per_batch, 0.f, scratch1, all_sequence_length, temp_matrix_size, batches))) { return false; } // apply softmax and store result P to scratch2: BxNxSxS* if (use_2d_attention_mask) { // 2d attention mask if (!ComputeSoftmaxWithMask2D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size)) { return false; } } else if (nullptr != mask_index) { // 1d mask index ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1); // mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions. const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr; if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) { return false; } } else { // no mask if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) { return false; } } // compute P*V (as V*P), and store in scratch3: BxNxSxH if (!CUBLAS_CALL(CublasGemmStridedBatched( cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, sequence_length, all_sequence_length, 1.f, v, head_size, present_size_per_batch, scratch2, all_sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) { return false; } // scratch3 is BxNxSxH, transpose to output BxSxNxH return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, scratch3, output); } bool LaunchAttentionKernel( const void* input, const int* mask_index, const std::vector<int64_t>* mask_index_dims, void* output, const int batch_size, const int sequence_length, const int num_heads, const int head_size, void* workspace, hipblasHandle_t& cublas, const size_t element_size, bool is_unidirectional, int past_sequence_length, const void* past, void* present) { // use default stream const hipStream_t stream = nullptr; if (element_size == 2) { return QkvToContext(cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present)); } else { return QkvToContext(cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
f7902763c79dd6cd206a77d4a970ad8cd6b3cbf9.cu
/* The implementation of this file is based on qkvToContext plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Modifications: scaling is moved from masked softmax to the gemm before that. // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cub/cub.cuh> #include <cublas_v2.h> #include <cuda_fp16.h> #include <math_constants.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "attention_impl.h" using namespace onnxruntime::cuda; using namespace cub; namespace onnxruntime { namespace contrib { namespace cuda { static size_t AlignTo(size_t a, size_t b) { return CeilDiv(a, b) * b; } size_t ScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) { const size_t len = batch_size * num_heads * sequence_length * all_sequence_length; const size_t bytes = len * element_size; const size_t alignment = 256; const size_t bytesAligned = AlignTo(bytes, alignment); return bytesAligned; } size_t GetAttentionWorkspaceSize( size_t element_size, int batch_size, int num_heads, int head_size, int sequence_length, int past_sequence_length) { size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size; return qkv_size + 2 * ScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length); } template <typename T, unsigned TPB> __device__ inline void Softmax(const int all_sequence_length, const int sequence_length, const int valid_end, const int valid_start, const T* input, T* output) { using BlockReduce = cub::BlockReduce<float, TPB>; __shared__ typename BlockReduce::TempStorage tmp_storage; __shared__ float sum_reverse_block; __shared__ float max_block; float thread_data_max(-CUDART_INF_F); // e^x is represented as infinity if x is large enough, like 100.f. // Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough. // a math transform as below is leveraged to get a stable softmax: // e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max)) const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length; for (int i = threadIdx.x; i < valid_end; i += TPB) { if (i >= valid_start) { const int index = offset + i; if (thread_data_max < float(input[index])) { thread_data_max = float(input[index]); } } } const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, cub::Max()); // Store max value if (threadIdx.x == 0) { max_block = max; } __syncthreads(); float thread_data_sum(0.f); for (int i = threadIdx.x; i < valid_end; i += TPB) { if (i >= valid_start) { const int index = offset + i; const float val = input[index]; thread_data_sum += expf(val - max_block); } } const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_sum, cub::Sum()); if (threadIdx.x == 0) { sum_reverse_block = 1.f / sum; } __syncthreads(); for (int i = threadIdx.x; i < all_sequence_length; i += TPB) { const int index = offset + i; const float val = (i >= valid_start && i < valid_end) ? expf(float(input[index]) - max_block) * sum_reverse_block : 0.f; output[index] = T(val); } } template <typename T, unsigned TPB> __device__ inline void SoftmaxSmall(const int all_sequence_length, const int sequence_length, const int valid_end, const int valid_start, const T* input, T* output, bool is_unidirectional) { using BlockReduce = cub::BlockReduce<float, TPB>; __shared__ typename BlockReduce::TempStorage tmp_storage; __shared__ float sum_reverse_block; __shared__ float max_block; // Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S; const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length; const int index = offset + threadIdx.x; bool is_valid = false; // whether it has attention mask == 1. // Update end position for unidirectional. int end = valid_end; if (is_unidirectional) { int end_unid = all_sequence_length - sequence_length + (blockIdx.x % sequence_length) + 1; if (end_unid <= valid_start) { // In this situation, mask of [0, end_unid) and [valid_start, valid_end) has -10000, and [end_unid, valid_start) and [valid_end, all_seq_len) has -20000. // So [0, end_unid) will also have value after softmax. is_valid = threadIdx.x < end_unid; } else { end = min(valid_end, end_unid); } } is_valid = is_valid || (threadIdx.x >= valid_start && threadIdx.x < end); // e^x is represented as infinity if x is large enough, like 100.f. // Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough. // a math transform as below is leveraged to get a stable softmax: // e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max)) float thread_data_max = is_valid ? float(input[index]) : float(-CUDART_INF_F); const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, cub::Max(), end); // Store max value if (threadIdx.x == 0) { max_block = max; } __syncthreads(); float thread_data_exp(0.f); if (is_valid) { thread_data_exp = expf(float(input[index]) - max_block); } const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, cub::Sum(), end); // Store value of 1.0/sum. if (threadIdx.x == 0) { sum_reverse_block = (1.f) / sum; } __syncthreads(); // threadIdx.x might be larger than all_sequence_length due to alignment to 32x. if (threadIdx.x < all_sequence_length) { output[index] = T(thread_data_exp * sum_reverse_block); } } template <typename T, unsigned TPB> __device__ inline void SoftmaxWithMask2DSmall(const int all_sequence_length, const int sequence_length, const int* attention_mask, // 2D attention mask const T* input, T* output, const bool is_unidirectional, const float scalar) { using BlockReduce = cub::BlockReduce<float, TPB>; __shared__ typename BlockReduce::TempStorage tmp_storage; __shared__ float sum_reverse_block; __shared__ float max_block; // Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S; int index = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length + threadIdx.x; float thread_data = -CUDART_INF_F; if (threadIdx.x < all_sequence_length) { const int& mask = attention_mask[blockIdx.y * all_sequence_length + threadIdx.x]; float mask_value = mask > 0 ? 0.0f : -10000.0f; if (is_unidirectional) { int from_index = all_sequence_length - sequence_length + (blockIdx.x % sequence_length); // offset of from token in all sequence length. if (threadIdx.x > from_index) { mask_value += -10000.0f; } } thread_data = float(input[index]) * scalar + mask_value; } const float max = BlockReduce(tmp_storage).Reduce(thread_data, cub::Max(), all_sequence_length); // Store max value if (threadIdx.x == 0) { max_block = max; } __syncthreads(); float thread_data_exp = threadIdx.x < all_sequence_length ? expf(thread_data - max_block) : 0.0f; const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, cub::Sum(), all_sequence_length); // Store value of 1.0/sum if (threadIdx.x == 0) { sum_reverse_block = (1.f) / sum; } __syncthreads(); if (threadIdx.x < all_sequence_length) { output[index] = T(thread_data_exp * sum_reverse_block); } } template <typename T, unsigned TPB> __global__ void SoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const T* input, T* output, bool is_unidirectional) { SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output, is_unidirectional); } template <typename T, unsigned TPB> __global__ void SoftmaxKernel(const int all_sequence_length, const int sequence_length, const T* input, T* output) { Softmax<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output); } template <typename T> bool ComputeSoftmax( cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads, const T* input, T* output, bool is_unidirectional) { const dim3 grid(sequence_length * num_heads, batch_size, 1); if (all_sequence_length <= 32) { const int blockSize = 32; SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 64) { const int blockSize = 64; SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 128) { const int blockSize = 128; SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 256) { const int blockSize = 256; SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 512) { const int blockSize = 512; SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (all_sequence_length <= 1024) { const int blockSize = 1024; SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional); } else if (!is_unidirectional) { const int blockSize = 1024; SoftmaxKernel<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output); } else { ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024."); } return CUDA_CALL(cudaPeekAtLastError()); } template <typename T, unsigned TPB> __global__ void MaskedSoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output, bool is_unidirectional) { __shared__ int start_position; __shared__ int end_position; if (threadIdx.x == 0) { const int batch = blockIdx.y; start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0; end_position = min(all_sequence_length, mask_end[batch]); // Attend to no word has same effect as attend to all words. This is added to get parity with CPU result. if (start_position >= end_position) { start_position = 0; end_position = all_sequence_length; } } __syncthreads(); SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output, is_unidirectional); } template <typename T, unsigned TPB> __global__ void MaskedSoftmaxKernel(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output) { __shared__ int start_position; __shared__ int end_position; if (threadIdx.x == 0) { const int batch = blockIdx.y; start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0; end_position = min(all_sequence_length, mask_end[batch]); // Attend to no word has same effect as attend to all words. This is added to get parity with CPU result. if (start_position >= end_position) { start_position = 0; end_position = all_sequence_length; } } __syncthreads(); Softmax<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output); } template <typename T, unsigned TPB> __global__ void SoftmaxWithMask2DSmallKernel(const int all_sequence_length, const int sequence_length, const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) { SoftmaxWithMask2DSmall<T, TPB>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } template <typename T> bool ComputeSoftmaxWithMask1D(cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads, const int* mask_index, const int* mask_start, const T* input, T* output, const bool is_unidirectional) { const dim3 grid(sequence_length * num_heads, batch_size, 1); if (all_sequence_length <= 32) { const int blockSize = 32; MaskedSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 64) { const int blockSize = 64; MaskedSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 128) { const int blockSize = 128; MaskedSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 256) { const int blockSize = 256; MaskedSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 512) { const int blockSize = 512; MaskedSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (all_sequence_length <= 1024) { const int blockSize = 1024; MaskedSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional); } else if (!is_unidirectional) { const int blockSize = 1024; MaskedSoftmaxKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output); } else { ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024."); } return CUDA_CALL(cudaPeekAtLastError()); } template <typename T> bool ComputeSoftmaxWithMask2D(cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads, const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) { const dim3 grid(sequence_length * num_heads, batch_size, 1); if (all_sequence_length <= 32) { const int blockSize = 32; SoftmaxWithMask2DSmallKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 64) { const int blockSize = 64; SoftmaxWithMask2DSmallKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 128) { const int blockSize = 128; SoftmaxWithMask2DSmallKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 256) { const int blockSize = 256; SoftmaxWithMask2DSmallKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 512) { const int blockSize = 512; SoftmaxWithMask2DSmallKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else if (all_sequence_length <= 1024) { const int blockSize = 1024; SoftmaxWithMask2DSmallKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar); } else { ORT_THROW("Attention CUDA operator does not supported 2D attention mask with total sequence length > 1024."); } return CUDA_CALL(cudaPeekAtLastError()); } template <typename T> __global__ void TransposeCtx(const int H, const T* input, T* output) { // Input: BxNxSxH // Output: BxSxNxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int num_heads = blockDim.y; int sequence_length = gridDim.x; const int NH = num_heads * H; const int NHS = NH * sequence_length; const int in_offset = s * H + n * sequence_length * H + b * NHS; const int out_offset = n * H + s * NH + b * NHS; const int i = threadIdx.x; if (i < H) { output[out_offset + i] = input[in_offset + i]; } } bool LaunchTransCtx(cudaStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const float* input, float* output) { const dim3 grid(sequence_length, batch_size, 1); if (0 == (head_size & 1)) { const int H = head_size / 2; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); const dim3 block(H, num_heads, 1); TransposeCtx<float2><<<grid, block, 0, stream>>>(H, input2, output2); } else { const dim3 block(head_size, num_heads, 1); TransposeCtx<float><<<grid, block, 0, stream>>>(head_size, input, output); } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchTransCtx(cudaStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const half* input, half* output) { const dim3 grid(sequence_length, batch_size, 1); if (0 == (head_size % 4)) { const int H = head_size / 4; const dim3 block(H, num_heads, 1); const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); TransposeCtx<float2><<<grid, block, 0, stream>>>(H, input2, output2); } else if (0 == (head_size & 1)) { const int H = head_size / 2; const dim3 block(H, num_heads, 1); const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); TransposeCtx<half2><<<grid, block, 0, stream>>>(H, input2, output2); } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel. const dim3 block(head_size, num_heads, 1); TransposeCtx<half><<<grid, block, 0, stream>>>(head_size, input, output); } return CUDA_CALL(cudaPeekAtLastError()); } template <typename T> __global__ void TransposeQKV(const int H, const T* input, T* output) { // Input: BxSx3xNxH // Output: 3xBxNxSxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int m = blockIdx.z; // matrix id const int num_heads = blockDim.y; const int sequence_length = gridDim.x; const int batch_size = gridDim.y; const int NH = num_heads * H; const int NHS = NH * sequence_length; const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3; const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size; const int i = threadIdx.x; if (i < H) { output[out_offset + i] = input[in_offset + i]; } } bool LaunchTransQkv(cudaStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const float* input, float* output) { const dim3 grid(sequence_length, batch_size, 3); if (0 == (head_size & 1)) { const int H = head_size / 2; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); const dim3 block(H, num_heads, 1); TransposeQKV<float2><<<grid, block, 0, stream>>>(H, input2, output2); } else { const dim3 block(head_size, num_heads, 1); TransposeQKV<float><<<grid, block, 0, stream>>>(head_size, input, output); } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchTransQkv(cudaStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const half* input, half* output) { const dim3 grid(sequence_length, batch_size, 3); if (0 == (head_size % 4)) { const int H = head_size / 4; const dim3 block(H, num_heads, 1); const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); TransposeQKV<float2><<<grid, block, 0, stream>>>(H, input2, output2); } else if (0 == (head_size & 1)) { const int H = head_size / 2; const dim3 block(H, num_heads, 1); const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); TransposeQKV<half2><<<grid, block, 0, stream>>>(H, input2, output2); } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.. const dim3 block(head_size, num_heads, 1); TransposeQKV<half><<<grid, block, 0, stream>>>(head_size, input, output); } return CUDA_CALL(cudaPeekAtLastError()); } template <typename T> __global__ void ConcatPastToPresent(const int sequence_length, const T* past, const T* k_v, T* present) { const int h = threadIdx.x; const int n = threadIdx.y; const int s = blockIdx.x; const int b = blockIdx.y; const int is_v = blockIdx.z; // 0 for k, 1 for v const int all_sequence_length = gridDim.x; const int batch_size = gridDim.y; const int num_heads = blockDim.y; const int H = blockDim.x; // past: 2 x BxNxS'xH (past_k and past_v) // k_v: 2 x BxNxSxH (k and v) // present: 2 x BxNxS*xH (present_k and present_v) const int past_sequence_length = all_sequence_length - sequence_length; const int present_SH = all_sequence_length * H; const int present_NSH = num_heads * present_SH; int out_offset = b * present_NSH + n * present_SH + s * H + h + is_v * (present_NSH * batch_size); if (s < past_sequence_length) { const int past_SH = past_sequence_length * H; const int past_NSH = num_heads * past_SH; const int in_offset = b * past_NSH + n * past_SH + s * H + h + is_v * (past_NSH * batch_size); present[out_offset] = past[in_offset]; } else if (s < all_sequence_length) { const int SH = sequence_length * H; const int NSH = num_heads * SH; const int in_offset = b * NSH + n * SH + (s - past_sequence_length) * H + h + is_v * (NSH * batch_size); present[out_offset] = k_v[in_offset]; } } bool LaunchConcatPastToPresent(cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const float* past, const float* k_v, float* present) { const dim3 grid(all_sequence_length, batch_size, 2); if (0 == (head_size & 1)) { const dim3 block(head_size / 2, num_heads, 1); ConcatPastToPresent<float2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present)); } else { const dim3 block(head_size, num_heads, 1); ConcatPastToPresent<float><<<grid, block, 0, stream>>>(sequence_length, past, k_v, present); } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchConcatPastToPresent(cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const half* past, const half* k_v, half* present) { const dim3 grid(all_sequence_length, batch_size, 2); if (0 == (head_size % 4)) { const dim3 block(head_size / 4, num_heads, 1); ConcatPastToPresent<float2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present)); } else if (0 == (head_size & 1)) { const dim3 block(head_size / 2, num_heads, 1); ConcatPastToPresent<half2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const half2*>(past), reinterpret_cast<const half2*>(k_v), reinterpret_cast<half2*>(present)); } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel. const dim3 block(head_size, num_heads, 1); ConcatPastToPresent<half><<<grid, block, 0, stream>>>(sequence_length, past, k_v, present); } return CUDA_CALL(cudaPeekAtLastError()); } cublasStatus_t inline CublasGemmStridedBatched( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float alpha, const float* A, int lda, long long int strideA, const float* B, int ldb, long long int strideB, const float beta, float* C, int ldc, long long int strideC, int batchCount) { return cublasSgemmStridedBatched( handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount); } cublasStatus_t inline CublasGemmStridedBatched( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const half alpha, const half* A, int lda, long long int strideA, const half* B, int ldb, long long int strideB, const half beta, half* C, int ldc, long long int strideC, int batchCount) { return cublasHgemmStridedBatched( handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount); } template <typename T> bool QkvToContext( cublasHandle_t& cublas, cudaStream_t stream, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size, const T* input, T* output, T* workspace, const int* mask_index, const std::vector<int64_t>* mask_index_dims, bool is_unidirectional, int past_sequence_length, const T* past, T* present) { const int all_sequence_length = past_sequence_length + sequence_length; const size_t bytes = ScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length); T* scratch1 = workspace; T* scratch2 = scratch1 + (bytes / element_size); T* scratch3 = scratch2 + (bytes / element_size); // input should be BxSx3xNxH => scratch3: 3xBxNxSxH if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, scratch3)) { return false; } // now scratch3 has Q, K, V: each has size BxNxSxH const int batches = batch_size * num_heads; const int size_per_batch = sequence_length * head_size; const int total_size = batches * size_per_batch; const T* q = scratch3; const T* k = q + total_size; const T* v = k + total_size; cublasSetStream(cublas, stream); CublasMathModeSetter helper(cublas, CUBLAS_TENSOR_OP_MATH); // Concat past (2xBxNxS'xH) to present (2xBxNxS*xH): // past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH) // past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH) const int present_size_per_batch = all_sequence_length * head_size; if (nullptr != present) { if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, past, k, present)) { return false; } // update pointers to present_k and present_v. k = present; v = present + batches * present_size_per_batch; } bool use_2d_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() == 2); // compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS* // Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS* const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size)); const int temp_matrix_size = sequence_length * all_sequence_length; T alpha = (T)(use_2d_attention_mask ? 1.0f : rsqrt_head_size); if (!CUBLAS_CALL(CublasGemmStridedBatched( cublas, CUBLAS_OP_T, CUBLAS_OP_N, all_sequence_length, sequence_length, head_size, alpha, k, head_size, present_size_per_batch, q, head_size, size_per_batch, 0.f, scratch1, all_sequence_length, temp_matrix_size, batches))) { return false; } // apply softmax and store result P to scratch2: BxNxSxS* if (use_2d_attention_mask) { // 2d attention mask if (!ComputeSoftmaxWithMask2D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size)) { return false; } } else if (nullptr != mask_index) { // 1d mask index ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1); // mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions. const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr; if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) { return false; } } else { // no mask if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) { return false; } } // compute P*V (as V*P), and store in scratch3: BxNxSxH if (!CUBLAS_CALL(CublasGemmStridedBatched( cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, sequence_length, all_sequence_length, 1.f, v, head_size, present_size_per_batch, scratch2, all_sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) { return false; } // scratch3 is BxNxSxH, transpose to output BxSxNxH return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, scratch3, output); } bool LaunchAttentionKernel( const void* input, const int* mask_index, const std::vector<int64_t>* mask_index_dims, void* output, const int batch_size, const int sequence_length, const int num_heads, const int head_size, void* workspace, cublasHandle_t& cublas, const size_t element_size, bool is_unidirectional, int past_sequence_length, const void* past, void* present) { // use default stream const cudaStream_t stream = nullptr; if (element_size == 2) { return QkvToContext(cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present)); } else { return QkvToContext(cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
23fe7c40bbd1e0d5b7ec0c512f800ba737c81218.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "QueueHelpers.cu" //////////////////////////////////////////////////////////// // Constructor and Deconsturctor //////////////////////////////////////////////////////////// Queue CreateQueue(int MaxElements) { Queue Q = (Queue) malloc (sizeof(struct QueueRecord)); hipMalloc((void **)&(Q->Array), sizeof(JobDescription)*MaxElements); Q->Capacity = MaxElements; Q->Front = 1; Q->Rear = 0; Q->ReadLock = 0; Queue d_Q; hipMalloc(&d_Q, sizeof(struct QueueRecord)); cudaSafeMemcpy(d_Q, Q, sizeof(struct QueueRecord), hipMemcpyHostToDevice, stream_dataIn, "Copying initial queue to device"); free(Q); return d_Q; } void DisposeQueue(Queue d_Q) { Queue h_Q = (Queue) malloc(sizeof(struct QueueRecord)); cudaSafeMemcpy(h_Q, d_Q, sizeof(struct QueueRecord), hipMemcpyDeviceToHost, stream_dataIn, "DisposeQueue, Copying Queue to get array pointer"); hipFree(h_Q->Array); free(h_Q); hipFree(d_Q); } //////////////////////////////////////////////////////////// // Host Functions to Change Queues //////////////////////////////////////////////////////////// void EnqueueJob(JobDescription *h_JobDescription, Queue Q) { //called by CPU int copySize= sizeof(struct QueueRecord); Queue h_Q = (Queue) malloc(sizeof(struct QueueRecord)); cudaSafeMemcpy(h_Q, Q, copySize, hipMemcpyDeviceToHost, stream_dataIn, "EnqueueJob, Getting Queue"); /* printf("Queue Values at Enqueue\n"); printf(" Capacity, %d\n", h_Q->Capacity); printf(" Rear, %d\n", h_Q->Rear); printf(" Front, %d\n\n", h_Q->Front); */ while(h_IsFull(h_Q)){ pthread_yield(); cudaSafeMemcpy(h_Q, Q, copySize, hipMemcpyDeviceToHost, stream_dataIn, "EnqueueJob, Getting Queue again..."); } // floating point exception from mod capacity if 0 or -n h_Q->Rear = (h_Q->Rear+1)%(h_Q->Capacity); // set job description cudaSafeMemcpy( &h_Q->Array[h_Q->Rear], h_JobDescription, sizeof(JobDescription), hipMemcpyHostToDevice, stream_dataIn, "EnqueueJob, Writing Job Description"); cudaSafeMemcpy(movePointer(Q, 12), movePointer(h_Q, 12), sizeof(int), hipMemcpyHostToDevice, stream_dataIn, "EnqueueJob, Updating Queue"); free(h_Q); } JobDescription *FrontAndDequeueResult(Queue Q) { //called by CPU int copySize= sizeof(struct QueueRecord); Queue h_Q = (Queue) malloc(sizeof(struct QueueRecord)); cudaSafeMemcpy(h_Q, Q, copySize, hipMemcpyDeviceToHost, stream_dataOut, "FandDJob, Getting Queue"); /* printf("Queue Values at Dequeue\n"); printf(" Capacity, %d\n", h_Q->Capacity); printf(" Rear, %d\n", h_Q->Rear); printf(" Front, %d\n", h_Q->Front); */ while(h_IsEmpty(h_Q)){ pthread_yield(); cudaSafeMemcpy(h_Q, Q, copySize, hipMemcpyDeviceToHost, stream_dataOut, "FandDJob, Getting Queue again..."); } JobDescription *result = (JobDescription *) malloc(sizeof(JobDescription)); cudaSafeMemcpy(result, &h_Q->Array[h_Q->Front], sizeof(JobDescription), hipMemcpyDeviceToHost, stream_dataOut, "FandDJob, Getting Job Description"); h_Q->Front = (h_Q->Front+1)%(h_Q->Capacity); cudaSafeMemcpy( movePointer(Q, 16), movePointer(h_Q, 16), sizeof(int), hipMemcpyHostToDevice, stream_dataOut, "FandDJob, Updating Queue"); free(h_Q); return result; } //////////////////////////////////////////////////////////// // Device Functions to Change Queues //////////////////////////////////////////////////////////// __device__ void FrontAndDequeueJob(volatile Queue Q, volatile JobPointer Result) { //called by GPU getLock(Q); int count = 0; while(d_IsEmpty(Q))count++; volatile int *front = &Q->Front; *Result = Q->Array[*front]; *front = (*front+1)%(Q->Capacity); releaseLock(Q); } __device__ void EnqueueResult(volatile JobPointer X, volatile Queue Q) { //called by GPU getLock(Q); int count =0; while(d_IsFull(Q))count++; volatile int *rear = &Q->Rear; int temp = (*rear + 1)%(Q->Capacity); Q->Array[temp] = *X; *rear = temp; releaseLock(Q); }
23fe7c40bbd1e0d5b7ec0c512f800ba737c81218.cu
#include <stdlib.h> #include "QueueHelpers.cu" //////////////////////////////////////////////////////////// // Constructor and Deconsturctor //////////////////////////////////////////////////////////// Queue CreateQueue(int MaxElements) { Queue Q = (Queue) malloc (sizeof(struct QueueRecord)); cudaMalloc((void **)&(Q->Array), sizeof(JobDescription)*MaxElements); Q->Capacity = MaxElements; Q->Front = 1; Q->Rear = 0; Q->ReadLock = 0; Queue d_Q; cudaMalloc(&d_Q, sizeof(struct QueueRecord)); cudaSafeMemcpy(d_Q, Q, sizeof(struct QueueRecord), cudaMemcpyHostToDevice, stream_dataIn, "Copying initial queue to device"); free(Q); return d_Q; } void DisposeQueue(Queue d_Q) { Queue h_Q = (Queue) malloc(sizeof(struct QueueRecord)); cudaSafeMemcpy(h_Q, d_Q, sizeof(struct QueueRecord), cudaMemcpyDeviceToHost, stream_dataIn, "DisposeQueue, Copying Queue to get array pointer"); cudaFree(h_Q->Array); free(h_Q); cudaFree(d_Q); } //////////////////////////////////////////////////////////// // Host Functions to Change Queues //////////////////////////////////////////////////////////// void EnqueueJob(JobDescription *h_JobDescription, Queue Q) { //called by CPU int copySize= sizeof(struct QueueRecord); Queue h_Q = (Queue) malloc(sizeof(struct QueueRecord)); cudaSafeMemcpy(h_Q, Q, copySize, cudaMemcpyDeviceToHost, stream_dataIn, "EnqueueJob, Getting Queue"); /* printf("Queue Values at Enqueue\n"); printf(" Capacity, %d\n", h_Q->Capacity); printf(" Rear, %d\n", h_Q->Rear); printf(" Front, %d\n\n", h_Q->Front); */ while(h_IsFull(h_Q)){ pthread_yield(); cudaSafeMemcpy(h_Q, Q, copySize, cudaMemcpyDeviceToHost, stream_dataIn, "EnqueueJob, Getting Queue again..."); } // floating point exception from mod capacity if 0 or -n h_Q->Rear = (h_Q->Rear+1)%(h_Q->Capacity); // set job description cudaSafeMemcpy( &h_Q->Array[h_Q->Rear], h_JobDescription, sizeof(JobDescription), cudaMemcpyHostToDevice, stream_dataIn, "EnqueueJob, Writing Job Description"); cudaSafeMemcpy(movePointer(Q, 12), movePointer(h_Q, 12), sizeof(int), cudaMemcpyHostToDevice, stream_dataIn, "EnqueueJob, Updating Queue"); free(h_Q); } JobDescription *FrontAndDequeueResult(Queue Q) { //called by CPU int copySize= sizeof(struct QueueRecord); Queue h_Q = (Queue) malloc(sizeof(struct QueueRecord)); cudaSafeMemcpy(h_Q, Q, copySize, cudaMemcpyDeviceToHost, stream_dataOut, "FandDJob, Getting Queue"); /* printf("Queue Values at Dequeue\n"); printf(" Capacity, %d\n", h_Q->Capacity); printf(" Rear, %d\n", h_Q->Rear); printf(" Front, %d\n", h_Q->Front); */ while(h_IsEmpty(h_Q)){ pthread_yield(); cudaSafeMemcpy(h_Q, Q, copySize, cudaMemcpyDeviceToHost, stream_dataOut, "FandDJob, Getting Queue again..."); } JobDescription *result = (JobDescription *) malloc(sizeof(JobDescription)); cudaSafeMemcpy(result, &h_Q->Array[h_Q->Front], sizeof(JobDescription), cudaMemcpyDeviceToHost, stream_dataOut, "FandDJob, Getting Job Description"); h_Q->Front = (h_Q->Front+1)%(h_Q->Capacity); cudaSafeMemcpy( movePointer(Q, 16), movePointer(h_Q, 16), sizeof(int), cudaMemcpyHostToDevice, stream_dataOut, "FandDJob, Updating Queue"); free(h_Q); return result; } //////////////////////////////////////////////////////////// // Device Functions to Change Queues //////////////////////////////////////////////////////////// __device__ void FrontAndDequeueJob(volatile Queue Q, volatile JobPointer Result) { //called by GPU getLock(Q); int count = 0; while(d_IsEmpty(Q))count++; volatile int *front = &Q->Front; *Result = Q->Array[*front]; *front = (*front+1)%(Q->Capacity); releaseLock(Q); } __device__ void EnqueueResult(volatile JobPointer X, volatile Queue Q) { //called by GPU getLock(Q); int count =0; while(d_IsFull(Q))count++; volatile int *rear = &Q->Rear; int temp = (*rear + 1)%(Q->Capacity); Q->Array[temp] = *X; *rear = temp; releaseLock(Q); }
25c053def2a4cf1f10f5a4fea948dd7e7564e1de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void MultMatrix(float *a, float *b, float *c, int n) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int sum = 0; for (int k = 0; k < n; k++) { sum += a[row*n + k] * b[k * n + col]; } c[row*n + col] = sum; } void imprimir(int *a, int *b, int *c, int n){ int i = 0, j = 0; for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ printf("%d ", a[i*n + j]); } printf("\n"); } printf("\n"); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ printf("%d ", b[i*n + j]); } printf("\n"); } printf("\n"); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ printf("%d ", c[i*n + j]); } printf("\n"); } printf("\n"); } int main() { //Declarar ponteiros FILE *aFile; FILE *bFile; float *a, *b, *c; float *d_a, *d_b, *d_c; int i, j; int larguraA, alturaA, larguraB, alturaB; aFile = fopen("MatrizA.dat", "rb"); if (aFile == NULL) { fputs("File error", stderr); exit(1); } bFile = fopen("MatrizB.dat", "rb"); if (bFile == NULL) { fputs("File error", stderr); exit(1); } fread(&larguraA, sizeof(int), 1, aFile); fread(&alturaA, sizeof(int), 1, aFile); fread(&larguraB, sizeof(int), 1, bFile); fread(&alturaB, sizeof(int), 1, bFile); //Tamanhos const int n = alturaA; int size = n*n; int size_of = size*sizeof(float); //Reset GPU hipDeviceReset(); //Declarando variveis inciais a = (float *)malloc(size * sizeof(float)); b = (float *)malloc(size * sizeof(float)); c = (float *)malloc(size * sizeof(float)); //Declarando variveis na memria da GPU hipMalloc((void **)&d_a, size*sizeof(float)); hipMalloc((void **)&d_b, size*sizeof(float)); hipMalloc((void **)&d_c, size*sizeof(float)); //Gerar Matrizes for (i = 0; i < size; i++){ fread(&a[i], sizeof(float), 1, aFile); } for (i = 0; i < size; i++){ fread(&b[i], sizeof(float), 1, bFile); } //Copiando dados do Host para o Device hipMemcpy(d_a, a, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_c, c, size*sizeof(float), hipMemcpyHostToDevice); dim3 threadsPerBlock(n, n, 1); dim3 dimGrid(1, 1, 1); if (n > 16){ threadsPerBlock.x = 16; threadsPerBlock.y = 16; threadsPerBlock.z = 1; dimGrid.x = n / threadsPerBlock.x; dimGrid.y = n / threadsPerBlock.y; dimGrid.z = 1; } //Chama funo no KERNEL hipLaunchKernelGGL(( MultMatrix), dim3(dimGrid), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c, n); //Sincronizando Threads hipDeviceSynchronize(); //Copiando o resultado(c) para o Host hipMemcpy(c, d_c, size_of, hipMemcpyDeviceToHost); //Testes //imprimir(a, b, c, n); //Free free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); system("pause"); return 0; }
25c053def2a4cf1f10f5a4fea948dd7e7564e1de.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void MultMatrix(float *a, float *b, float *c, int n) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int sum = 0; for (int k = 0; k < n; k++) { sum += a[row*n + k] * b[k * n + col]; } c[row*n + col] = sum; } void imprimir(int *a, int *b, int *c, int n){ int i = 0, j = 0; for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ printf("%d ", a[i*n + j]); } printf("\n"); } printf("\n"); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ printf("%d ", b[i*n + j]); } printf("\n"); } printf("\n"); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ printf("%d ", c[i*n + j]); } printf("\n"); } printf("\n"); } int main() { //Declarar ponteiros FILE *aFile; FILE *bFile; float *a, *b, *c; float *d_a, *d_b, *d_c; int i, j; int larguraA, alturaA, larguraB, alturaB; aFile = fopen("MatrizA.dat", "rb"); if (aFile == NULL) { fputs("File error", stderr); exit(1); } bFile = fopen("MatrizB.dat", "rb"); if (bFile == NULL) { fputs("File error", stderr); exit(1); } fread(&larguraA, sizeof(int), 1, aFile); fread(&alturaA, sizeof(int), 1, aFile); fread(&larguraB, sizeof(int), 1, bFile); fread(&alturaB, sizeof(int), 1, bFile); //Tamanhos const int n = alturaA; int size = n*n; int size_of = size*sizeof(float); //Reset GPU cudaDeviceReset(); //Declarando variáveis inciais a = (float *)malloc(size * sizeof(float)); b = (float *)malloc(size * sizeof(float)); c = (float *)malloc(size * sizeof(float)); //Declarando variáveis na memória da GPU cudaMalloc((void **)&d_a, size*sizeof(float)); cudaMalloc((void **)&d_b, size*sizeof(float)); cudaMalloc((void **)&d_c, size*sizeof(float)); //Gerar Matrizes for (i = 0; i < size; i++){ fread(&a[i], sizeof(float), 1, aFile); } for (i = 0; i < size; i++){ fread(&b[i], sizeof(float), 1, bFile); } //Copiando dados do Host para o Device cudaMemcpy(d_a, a, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, size*sizeof(float), cudaMemcpyHostToDevice); dim3 threadsPerBlock(n, n, 1); dim3 dimGrid(1, 1, 1); if (n > 16){ threadsPerBlock.x = 16; threadsPerBlock.y = 16; threadsPerBlock.z = 1; dimGrid.x = n / threadsPerBlock.x; dimGrid.y = n / threadsPerBlock.y; dimGrid.z = 1; } //Chama função no KERNEL MultMatrix<<<dimGrid, threadsPerBlock>>>(d_a, d_b, d_c, n); //Sincronizando Threads cudaDeviceSynchronize(); //Copiando o resultado(c) para o Host cudaMemcpy(c, d_c, size_of, cudaMemcpyDeviceToHost); //Testes //imprimir(a, b, c, n); //Free free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); system("pause"); return 0; }
06c091452009177d0892636fd9161849f89d7eaf.hip
// !!! This is a file automatically generated by hipify!!! #include "./c_runtime_api.h" #include <cassert> #include <cstdio> #include <rocblas.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 16 __global__ void array_set_kernel(int nrow, int ncol, float* input, float value) { int c_ = blockIdx.x * blockDim.x + threadIdx.x; int r_ = blockIdx.y * blockDim.y + threadIdx.y; if (r_ >= nrow || c_ >= ncol) return; input[r_ * ncol + c_] = value; } int DLGpuArraySet(DLArrayHandle arr, float value) { int nrow = arr->shape[0]; int ncol = arr->shape[1]; float* input_data = (float*)arr->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( array_set_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, nrow, ncol, input_data, value); return 0; } __global__ void broadcast_to_kernel(int ntimes, int nnum, const float* input, float* output) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= nnum) return; for (int i = 0; i < ntimes; i++) { float* output_ = output + nnum * i; output_[n] = input[n]; } } int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) { int ntimes = output->shape[0]; int nnum = 1; for (int i = 1; i < output->ndim; i++) nnum *= output->shape[i]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((nnum + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( broadcast_to_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ntimes, nnum, input_data, output_data); return 0; } __global__ void reduce_sum_axis_zero_kernel(int reduce_n, int remain_n, const float* input, float* output) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= remain_n) return; float sum = 0.0; for (int i = 0; i < reduce_n; i++) { const float* input_ = input + remain_n * i; sum += input_[n]; } output[n] = sum; } int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) { int reduce_n = input->shape[0]; int remain_n = 1; for (int i = 1; i < input->ndim; i++) { remain_n *= input->shape[i]; } const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((remain_n + dimBlock.x- 1) / dimBlock.x); hipLaunchKernelGGL(( reduce_sum_axis_zero_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, reduce_n, remain_n, input_data, output_data); return 0; } __global__ void matrix_elementwise_add_kernel(int nrow, int ncol, const float* input_a, const float* input_b, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input_a[r_ * ncol + c_] + input_b[r_ * ncol + c_]; } int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { int nrow = matA->shape[0]; int ncol = matA->shape[1]; const float* input_data_a = (const float*)matA->data; const float* input_data_b = (const float*)matB->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( matrix_elementwise_add_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, nrow, ncol, input_data_a, input_data_b, output_data); return 0; } __global__ void matrix_elementwise_add_const_kernel(int nrow, int ncol, const float* input, float val, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input[r_ * ncol + c_] + val; } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( matrix_elementwise_add_const_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, nrow, ncol, input_data, val, output_data); return 0; } __global__ void matrix_elementwise_multiply_kernel(int nrow, int ncol, const float* input_a, const float* input_b, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input_a[r_ * ncol + c_] * input_b[r_ * ncol + c_]; } int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { int nrow = matA->shape[0]; int ncol = matA->shape[1]; const float* input_data_a = (const float*)matA->data; const float* input_data_b = (const float*)matB->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( matrix_elementwise_multiply_kernel), dim3(dimBlock), dim3(dimGrid), 0, 0, nrow, ncol, input_data_a, input_data_b, output_data); return 0; } __global__ void matrix_multiply_const(int nrow, int ncol, const float* input, float val, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input[r_ * ncol + c_] * val; } int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( matrix_multiply_const), dim3(dimGrid), dim3(dimBlock), 0, 0, nrow, ncol, input_data, val, output_data); return 0; } __global__ void matrix_multiply_kernel(const float* input_a, const float* input_b, bool transposeA, bool transposeB, int nrow_a, int ncol_a, int nrow_b, int ncol_b, int nrow, int ncol, int nwidth, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; float a = 0.0, b = 0.0, sum = 0.0; for (int i = 0; i < nwidth; i++) { a = transposeA ? input_a[ncol_a * i + r_] : input_a[ncol_a * r_ + i]; b = transposeB ? input_b[ncol_b * c_ + i] : input_b[ncol_b * i + c_]; sum += a * b; } output[ncol * r_ + c_] = sum; return; } int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { int nrow_a = matA->shape[0], ncol_a = matA->shape[1]; int nrow_b = matB->shape[0], ncol_b = matB->shape[1]; // nrow is the number of row of result matrix; int nrow = transposeA ? ncol_a : nrow_a; // ncol is the number of col of result matrix; int ncol = transposeB ? nrow_b : ncol_b; int nwidth = transposeA ? nrow_a : ncol_a; const float* input_data_a = (const float*)matA->data; const float* input_data_b = (const float*)matB->data; float* output_data = (float*)matC->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( matrix_multiply_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_data_a, input_data_b, transposeA, transposeB, nrow_a, ncol_a, nrow_b, ncol_b, nrow, ncol, nwidth, output_data); return 0; } __global__ void relu_kernel(int nrow, int ncol, const float* input, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; float val = input[r_ * ncol + c_]; output[r_ * ncol + c_] = val >= 0 ? val : 0; } int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol+ dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( relu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, nrow, ncol, input_data, output_data); return 0; } __global__ void relu_gradient_kernel(int nrow, int ncol, const float* input, const float* in_grad, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; float input_val = input[r_ * ncol + c_]; float in_grad_val = in_grad[r_ * ncol + c_]; output[r_ * ncol + c_] = input_val >= 0 ? in_grad_val : 0; } int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; const float* in_grad_data = (const float*)in_grad->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( relu_gradient_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, nrow, ncol, input_data, in_grad_data, output_data); return 0; } __global__ void softmax_kernel(int nrow, int ncol, const float* input, float* output) { int r_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow) return; input += r_ * ncol; float min_val = input[0]; for (int i = 1; i < ncol; i++) min_val = min(min_val, input[i]); float sum = 0.0; for (int i = 0; i < ncol; i++) sum += exp(input[i] - min_val); output += r_ * ncol; for (int i = 0; i < ncol; i++) output[i] = exp(input[i] - min_val) / sum; } int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((nrow + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( softmax_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, nrow, ncol, input_data, output_data); return 0; } // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol, const float *input_a, const float *input_b, float *output) { // Dynamic shared memory, size provided at kernel launch. extern __shared__ float loss_per_row[]; // Two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } input_a += y * ncol; input_b += y * ncol; float maxval = *input_a; // Find max for a row. for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_a[x]); } // Deduct by max for a row, and raise to exp. float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_a[x] - maxval); } // Compute per-row loss. float loss = 0; for (int x = 0; x < ncol; ++x) { loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum); } loss_per_row[y] = loss; __syncthreads(); // Compute reduce_mean across rows. float mean_loss = 0; // Use a single thread to reduce mean across rows. if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i = 0; i < nrow; ++i) { mean_loss += loss_per_row[i]; } mean_loss /= nrow; output[0] = mean_loss; } } int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a, const DLArrayHandle input_b, DLArrayHandle output) { assert(input_a->ndim == 2); assert(input_b->ndim == 2); assert(output->ndim == 1); assert(input_a->shape[0] == input_b->shape[0] && input_a->shape[1] == input_b->shape[1]); int nrow = input_a->shape[0]; // Maximum x- or y-dimension of a block = 1024 // But we need 'nrow' shared memory, and max shared memory is 48KB. // Conservatively allow max 16KB shared memory. assert(nrow <= 1024 * 4); int ncol = input_a->shape[1]; const float *input_data_a = (const float *)input_a->data; const float *input_data_b = (const float *)input_b->data; float *output_data = (float *)output->data; dim3 threads; if (nrow <= 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } // 1 block, each block with 'threads' number of threads with 'nrow' shared // memory size hipLaunchKernelGGL(( matrix_softmax_cross_entropy_kernel), dim3(1), dim3(threads), nrow * sizeof(float), 0, nrow, ncol, input_data_a, input_data_b, output_data); return 0; }
06c091452009177d0892636fd9161849f89d7eaf.cu
#include "./c_runtime_api.h" #include <cassert> #include <cstdio> #include <cublas_v2.h> #include <cuda_runtime.h> #define BLOCK_SIZE 16 __global__ void array_set_kernel(int nrow, int ncol, float* input, float value) { int c_ = blockIdx.x * blockDim.x + threadIdx.x; int r_ = blockIdx.y * blockDim.y + threadIdx.y; if (r_ >= nrow || c_ >= ncol) return; input[r_ * ncol + c_] = value; } int DLGpuArraySet(DLArrayHandle arr, float value) { int nrow = arr->shape[0]; int ncol = arr->shape[1]; float* input_data = (float*)arr->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); array_set_kernel<<<dimGrid, dimBlock>>>(nrow, ncol, input_data, value); return 0; } __global__ void broadcast_to_kernel(int ntimes, int nnum, const float* input, float* output) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= nnum) return; for (int i = 0; i < ntimes; i++) { float* output_ = output + nnum * i; output_[n] = input[n]; } } int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) { int ntimes = output->shape[0]; int nnum = 1; for (int i = 1; i < output->ndim; i++) nnum *= output->shape[i]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((nnum + dimBlock.x - 1) / dimBlock.x); broadcast_to_kernel<<<dimGrid, dimBlock>>>( ntimes, nnum, input_data, output_data); return 0; } __global__ void reduce_sum_axis_zero_kernel(int reduce_n, int remain_n, const float* input, float* output) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= remain_n) return; float sum = 0.0; for (int i = 0; i < reduce_n; i++) { const float* input_ = input + remain_n * i; sum += input_[n]; } output[n] = sum; } int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) { int reduce_n = input->shape[0]; int remain_n = 1; for (int i = 1; i < input->ndim; i++) { remain_n *= input->shape[i]; } const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((remain_n + dimBlock.x- 1) / dimBlock.x); reduce_sum_axis_zero_kernel<<<dimGrid, dimBlock>>>( reduce_n, remain_n, input_data, output_data); return 0; } __global__ void matrix_elementwise_add_kernel(int nrow, int ncol, const float* input_a, const float* input_b, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input_a[r_ * ncol + c_] + input_b[r_ * ncol + c_]; } int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { int nrow = matA->shape[0]; int ncol = matA->shape[1]; const float* input_data_a = (const float*)matA->data; const float* input_data_b = (const float*)matB->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); matrix_elementwise_add_kernel<<<dimGrid, dimBlock>>>( nrow, ncol, input_data_a, input_data_b, output_data); return 0; } __global__ void matrix_elementwise_add_const_kernel(int nrow, int ncol, const float* input, float val, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input[r_ * ncol + c_] + val; } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); matrix_elementwise_add_const_kernel<<<dimGrid, dimBlock>>>( nrow, ncol, input_data, val, output_data); return 0; } __global__ void matrix_elementwise_multiply_kernel(int nrow, int ncol, const float* input_a, const float* input_b, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input_a[r_ * ncol + c_] * input_b[r_ * ncol + c_]; } int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { int nrow = matA->shape[0]; int ncol = matA->shape[1]; const float* input_data_a = (const float*)matA->data; const float* input_data_b = (const float*)matB->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); matrix_elementwise_multiply_kernel<<<dimBlock, dimGrid>>>( nrow, ncol, input_data_a, input_data_b, output_data); return 0; } __global__ void matrix_multiply_const(int nrow, int ncol, const float* input, float val, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; output[r_ * ncol + c_] = input[r_ * ncol + c_] * val; } int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); matrix_multiply_const<<<dimGrid, dimBlock>>>( nrow, ncol, input_data, val, output_data); return 0; } __global__ void matrix_multiply_kernel(const float* input_a, const float* input_b, bool transposeA, bool transposeB, int nrow_a, int ncol_a, int nrow_b, int ncol_b, int nrow, int ncol, int nwidth, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; float a = 0.0, b = 0.0, sum = 0.0; for (int i = 0; i < nwidth; i++) { a = transposeA ? input_a[ncol_a * i + r_] : input_a[ncol_a * r_ + i]; b = transposeB ? input_b[ncol_b * c_ + i] : input_b[ncol_b * i + c_]; sum += a * b; } output[ncol * r_ + c_] = sum; return; } int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { int nrow_a = matA->shape[0], ncol_a = matA->shape[1]; int nrow_b = matB->shape[0], ncol_b = matB->shape[1]; // nrow is the number of row of result matrix; int nrow = transposeA ? ncol_a : nrow_a; // ncol is the number of col of result matrix; int ncol = transposeB ? nrow_b : ncol_b; int nwidth = transposeA ? nrow_a : ncol_a; const float* input_data_a = (const float*)matA->data; const float* input_data_b = (const float*)matB->data; float* output_data = (float*)matC->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); matrix_multiply_kernel<<<dimGrid, dimBlock>>>( input_data_a, input_data_b, transposeA, transposeB, nrow_a, ncol_a, nrow_b, ncol_b, nrow, ncol, nwidth, output_data); return 0; } __global__ void relu_kernel(int nrow, int ncol, const float* input, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; float val = input[r_ * ncol + c_]; output[r_ * ncol + c_] = val >= 0 ? val : 0; } int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol+ dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); relu_kernel<<<dimGrid, dimBlock>>>( nrow, ncol, input_data, output_data); return 0; } __global__ void relu_gradient_kernel(int nrow, int ncol, const float* input, const float* in_grad, float* output) { int r_ = blockDim.y * blockIdx.y + threadIdx.y; int c_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow || c_ >= ncol) return; float input_val = input[r_ * ncol + c_]; float in_grad_val = in_grad[r_ * ncol + c_]; output[r_ * ncol + c_] = input_val >= 0 ? in_grad_val : 0; } int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; const float* in_grad_data = (const float*)in_grad->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((ncol + dimBlock.x - 1) / dimBlock.x, (nrow + dimBlock.y - 1) / dimBlock.y); relu_gradient_kernel<<<dimGrid, dimBlock>>>( nrow, ncol, input_data, in_grad_data, output_data); return 0; } __global__ void softmax_kernel(int nrow, int ncol, const float* input, float* output) { int r_ = blockDim.x * blockIdx.x + threadIdx.x; if (r_ >= nrow) return; input += r_ * ncol; float min_val = input[0]; for (int i = 1; i < ncol; i++) min_val = min(min_val, input[i]); float sum = 0.0; for (int i = 0; i < ncol; i++) sum += exp(input[i] - min_val); output += r_ * ncol; for (int i = 0; i < ncol; i++) output[i] = exp(input[i] - min_val) / sum; } int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) { int nrow = input->shape[0]; int ncol = input->shape[1]; const float* input_data = (const float*)input->data; float* output_data = (float*)output->data; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((nrow + dimBlock.x - 1) / dimBlock.x); softmax_kernel<<<dimGrid, dimBlock>>>( nrow, ncol, input_data, output_data); return 0; } // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol, const float *input_a, const float *input_b, float *output) { // Dynamic shared memory, size provided at kernel launch. extern __shared__ float loss_per_row[]; // Two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } input_a += y * ncol; input_b += y * ncol; float maxval = *input_a; // Find max for a row. for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_a[x]); } // Deduct by max for a row, and raise to exp. float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_a[x] - maxval); } // Compute per-row loss. float loss = 0; for (int x = 0; x < ncol; ++x) { loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum); } loss_per_row[y] = loss; __syncthreads(); // Compute reduce_mean across rows. float mean_loss = 0; // Use a single thread to reduce mean across rows. if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i = 0; i < nrow; ++i) { mean_loss += loss_per_row[i]; } mean_loss /= nrow; output[0] = mean_loss; } } int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a, const DLArrayHandle input_b, DLArrayHandle output) { assert(input_a->ndim == 2); assert(input_b->ndim == 2); assert(output->ndim == 1); assert(input_a->shape[0] == input_b->shape[0] && input_a->shape[1] == input_b->shape[1]); int nrow = input_a->shape[0]; // Maximum x- or y-dimension of a block = 1024 // But we need 'nrow' shared memory, and max shared memory is 48KB. // Conservatively allow max 16KB shared memory. assert(nrow <= 1024 * 4); int ncol = input_a->shape[1]; const float *input_data_a = (const float *)input_a->data; const float *input_data_b = (const float *)input_b->data; float *output_data = (float *)output->data; dim3 threads; if (nrow <= 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } // 1 block, each block with 'threads' number of threads with 'nrow' shared // memory size matrix_softmax_cross_entropy_kernel<<<1, threads, nrow * sizeof(float)>>>( nrow, ncol, input_data_a, input_data_b, output_data); return 0; }
687223d6a76882cf4185578554b1bdb21cb74226.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdafx.h" #include "Voxtrace.h" #include "RayCamera.h" #include "Renderer.h" #include <Engine.h> #include <Pipeline.h> #include <camera.h> #include <Line.h> #include <shader.h> #include <vbo.h> #include <vao.h> #include "CommonDevice.cuh" #include "cuda_gl_interop.h" //#include "pick.h" #include "Randy.h" #include "TraceInfo.h" #include "RTKernel.cuh" namespace Voxels { namespace { LinePool* lines = nullptr; // world description Block* blocks = nullptr; glm::ivec3 chunkDim = { 10, 10, 10 }; int numBlocks = chunkDim.x * chunkDim.y * chunkDim.z; // screen info //glm::vec2 screenDim = { 500, 265 }; //glm::vec2 screenDim = { 1920, 1080 }; // 1080p //glm::vec2 screenDim = { 1280, 720 }; // 720p glm::vec2 screenDim = { 853, 480 }; // 480p //glm::vec2 screenDim = { 125, 65 }; TraceInfo info; float fovDeg = 60.0f; // rendering shiz VBO* vbo = nullptr; VAO* vao = nullptr; GLuint screenTexture = -1; // cuda GL stuff cudaGraphicsResource* imageResource = nullptr; hipArray* arr = nullptr; const int KernelBlockSize = 256; const int KernelNumBlocks = (screenDim.x * screenDim.y + KernelBlockSize - 1) / KernelBlockSize; hiprandState_t* states; } void Init() { Engine::PushRenderCallback(Render, 4); InitGLStuff(); InitBlocks(); //InitCUDARand(screenDim.x * screenDim.y); InitCUDARand(states, KernelBlockSize * KernelNumBlocks); info.imgSize = screenDim; info.numShadowRays = 10; auto c = Renderer::GetPipeline()->GetCamera(0); c->GenProjection(fovDeg); } void Shutdown() { ShutdownCUDARands(states); } void InitGLStuff() { // TODO: move this to Vertices.h or something float quadVertices[] = { // positions // texture Coords -1.0f, 1.0f, 0.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 0.0f, }; // setup screen texture pointers vbo = new VBO(&quadVertices[0], sizeof(quadVertices), GL_STATIC_DRAW); VBOlayout layout; layout.Push<float>(3); // pos layout.Push<float>(2); // texcoord vao = new VAO(); vao->AddBuffer(*vbo, layout); // generate screen texture memory glGenTextures(1, &screenTexture); glBindTexture(GL_TEXTURE_2D, screenTexture); // cuda behavior becomes extremely weird when using RGB textures with it glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, screenDim.x, screenDim.y, 0, GL_RGBA, GL_FLOAT, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glBindTexture(GL_TEXTURE_2D, 0); // init texture color glm::vec4 defColor(1, .1, .1, 1.); glClearTexImage(screenTexture, 0, GL_RGBA, GL_FLOAT, &defColor[0]); // register the texture as a cuda resource cudaCheck( hipGraphicsGLRegisterImage(&imageResource, screenTexture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore)); } void InitBlocks() { // shared memory so that the GPU and CPU can both read and write blocks cudaCheck(hipMallocManaged(&blocks, numBlocks * sizeof(Block))); for (int i = 0; i < numBlocks; i++) { blocks[i].alpha = 0; blocks[i].n = 1.5f; auto pos = expand(i, chunkDim.x, chunkDim.y); //if (glm::all(glm::lessThan(pos, { 5, 5, 5 }))) { blocks[i].alpha = rand() % 100 > 80 ? 1 : 0; if (rand() % 100 > 95) blocks[i].reflect = true; if (rand() % 100 > 90) { blocks[i].refract = true; blocks[i].reflect = false; } //blocks[i].diffuse = { 1, 0, 0 }; } blocks[i].diffuse = Utils::get_random_vec3_r(0, 1); } } void Render() { auto c = Renderer::GetPipeline()->GetCamera(0); info.camera = PerspectiveRayCamera(c->GetPos(), c->GetPos() + c->GetDir(), glm::vec3(0, 1, 0), glm::radians(fovDeg / 2.0f), screenDim.x / screenDim.y); static Camera oldCam = *Renderer::GetPipeline()->GetCamera(0); bool dirtyCam = c->GetDir() != oldCam.GetDir() || c->GetPos() != oldCam.GetPos(); oldCam = *Renderer::GetPipeline()->GetCamera(0); // ray trace her { cudaCheck(hipGraphicsMapResources(1, &imageResource, 0)); cudaCheck(hipGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0)); cudaCheck(hipBindSurfaceToArray(GetScreenSurface(), arr)); // passing the info struct in creates crashes when calling info.camera.makeRay hipLaunchKernelGGL(( epicRayTracer), dim3(KernelNumBlocks), dim3(KernelBlockSize), 0, 0, blocks, chunkDim, info.camera, info.numShadowRays, info.imgSize, chunkDim, *Renderer::Sun(), states, dirtyCam); hipDeviceSynchronize(); cudaCheck(hipGraphicsUnmapResources(1, &imageResource, 0)); } // draw fullscreen quad ShaderPtr s = Shader::shaders["fullscreen"]; s->Use(); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, screenTexture); s->setInt("uTex", 0); vao->Bind(); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); vao->Unbind(); s->Unuse(); glClear(GL_DEPTH_BUFFER_BIT); if (lines) { ShaderPtr s = Shader::shaders["line"]; s->Use(); glm::mat4 model(1); glm::mat4 view = c->GetView(); glm::mat4 proj = c->GetProj(); s->setMat4("u_model", model); s->setMat4("u_view", view); s->setMat4("u_proj", proj); lines->Draw(); } } void CameraRaySnapshot() { delete lines; // ok if null (e.g. first instance) std::vector<glm::vec3> poss, dirs, tClrs, bClrs; glm::vec2 imgSize(screenDim); for (int x = 0; x < imgSize.x; x++) { for (int y = 0; y < imgSize.y; y++) { glm::vec2 screenCoord( (2.0f * x) / imgSize.x - 1.0f, (-2.0f * y) / imgSize.y + 1.0f); Ray ray = info.camera.makeRay(screenCoord); poss.push_back(ray.origin); dirs.push_back(ray.direction); } } // debug angle thing //float angle = glm::atan(100); //Ray ray = info.camera.makeRay({ 0, 0 }); //for (int i = 0; i < 1000; i++) //{ // glm::vec3 dir = ray.direction; // // generate random point on unit sphere // float theta = Utils::get_random(0, glm::two_pi<float>()); // range 0 to 2pi // float u = Utils::get_random(-1, 1); // range -1 to 1 // float squ = glm::sqrt(1 - u * u); // avoid computing this twice // glm::vec3 offset; // offset.x = glm::cos(theta) * squ; // offset.y = glm::sin(theta) * squ; // offset.z = u; // // radius of cone, from tan(radius/h)=angle // // x = 1 since this is unit cone // float radius = glm::atan(angle); // dir += (offset * radius); // glm::vec3 pos = ray.origin; // poss.push_back(pos); // dirs.push_back(glm::normalize(dir)); //} for (int i = 0; i < poss.size(); i++) { tClrs.push_back(glm::vec3(1)); bClrs.push_back(glm::vec3(0)); } lines = new LinePool(poss, dirs, tClrs, bClrs); } int& ShadowRays() { return info.numShadowRays; } }
687223d6a76882cf4185578554b1bdb21cb74226.cu
#include "stdafx.h" #include "Voxtrace.h" #include "RayCamera.h" #include "Renderer.h" #include <Engine.h> #include <Pipeline.h> #include <camera.h> #include <Line.h> #include <shader.h> #include <vbo.h> #include <vao.h> #include "CommonDevice.cuh" #include "cuda_gl_interop.h" //#include "pick.h" #include "Randy.h" #include "TraceInfo.h" #include "RTKernel.cuh" namespace Voxels { namespace { LinePool* lines = nullptr; // world description Block* blocks = nullptr; glm::ivec3 chunkDim = { 10, 10, 10 }; int numBlocks = chunkDim.x * chunkDim.y * chunkDim.z; // screen info //glm::vec2 screenDim = { 500, 265 }; //glm::vec2 screenDim = { 1920, 1080 }; // 1080p //glm::vec2 screenDim = { 1280, 720 }; // 720p glm::vec2 screenDim = { 853, 480 }; // 480p //glm::vec2 screenDim = { 125, 65 }; TraceInfo info; float fovDeg = 60.0f; // rendering shiz VBO* vbo = nullptr; VAO* vao = nullptr; GLuint screenTexture = -1; // cuda GL stuff cudaGraphicsResource* imageResource = nullptr; cudaArray* arr = nullptr; const int KernelBlockSize = 256; const int KernelNumBlocks = (screenDim.x * screenDim.y + KernelBlockSize - 1) / KernelBlockSize; curandState_t* states; } void Init() { Engine::PushRenderCallback(Render, 4); InitGLStuff(); InitBlocks(); //InitCUDARand(screenDim.x * screenDim.y); InitCUDARand(states, KernelBlockSize * KernelNumBlocks); info.imgSize = screenDim; info.numShadowRays = 10; auto c = Renderer::GetPipeline()->GetCamera(0); c->GenProjection(fovDeg); } void Shutdown() { ShutdownCUDARands(states); } void InitGLStuff() { // TODO: move this to Vertices.h or something float quadVertices[] = { // positions // texture Coords -1.0f, 1.0f, 0.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 0.0f, }; // setup screen texture pointers vbo = new VBO(&quadVertices[0], sizeof(quadVertices), GL_STATIC_DRAW); VBOlayout layout; layout.Push<float>(3); // pos layout.Push<float>(2); // texcoord vao = new VAO(); vao->AddBuffer(*vbo, layout); // generate screen texture memory glGenTextures(1, &screenTexture); glBindTexture(GL_TEXTURE_2D, screenTexture); // cuda behavior becomes extremely weird when using RGB textures with it glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, screenDim.x, screenDim.y, 0, GL_RGBA, GL_FLOAT, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glBindTexture(GL_TEXTURE_2D, 0); // init texture color glm::vec4 defColor(1, .1, .1, 1.); glClearTexImage(screenTexture, 0, GL_RGBA, GL_FLOAT, &defColor[0]); // register the texture as a cuda resource cudaCheck( cudaGraphicsGLRegisterImage(&imageResource, screenTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore)); } void InitBlocks() { // shared memory so that the GPU and CPU can both read and write blocks cudaCheck(cudaMallocManaged(&blocks, numBlocks * sizeof(Block))); for (int i = 0; i < numBlocks; i++) { blocks[i].alpha = 0; blocks[i].n = 1.5f; auto pos = expand(i, chunkDim.x, chunkDim.y); //if (glm::all(glm::lessThan(pos, { 5, 5, 5 }))) { blocks[i].alpha = rand() % 100 > 80 ? 1 : 0; if (rand() % 100 > 95) blocks[i].reflect = true; if (rand() % 100 > 90) { blocks[i].refract = true; blocks[i].reflect = false; } //blocks[i].diffuse = { 1, 0, 0 }; } blocks[i].diffuse = Utils::get_random_vec3_r(0, 1); } } void Render() { auto c = Renderer::GetPipeline()->GetCamera(0); info.camera = PerspectiveRayCamera(c->GetPos(), c->GetPos() + c->GetDir(), glm::vec3(0, 1, 0), glm::radians(fovDeg / 2.0f), screenDim.x / screenDim.y); static Camera oldCam = *Renderer::GetPipeline()->GetCamera(0); bool dirtyCam = c->GetDir() != oldCam.GetDir() || c->GetPos() != oldCam.GetPos(); oldCam = *Renderer::GetPipeline()->GetCamera(0); // ray trace her { cudaCheck(cudaGraphicsMapResources(1, &imageResource, 0)); cudaCheck(cudaGraphicsSubResourceGetMappedArray(&arr, imageResource, 0, 0)); cudaCheck(cudaBindSurfaceToArray(GetScreenSurface(), arr)); // passing the info struct in creates crashes when calling info.camera.makeRay epicRayTracer<<<KernelNumBlocks, KernelBlockSize>>>( blocks, chunkDim, info.camera, info.numShadowRays, info.imgSize, chunkDim, *Renderer::Sun(), states, dirtyCam); cudaDeviceSynchronize(); cudaCheck(cudaGraphicsUnmapResources(1, &imageResource, 0)); } // draw fullscreen quad ShaderPtr s = Shader::shaders["fullscreen"]; s->Use(); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, screenTexture); s->setInt("uTex", 0); vao->Bind(); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); vao->Unbind(); s->Unuse(); glClear(GL_DEPTH_BUFFER_BIT); if (lines) { ShaderPtr s = Shader::shaders["line"]; s->Use(); glm::mat4 model(1); glm::mat4 view = c->GetView(); glm::mat4 proj = c->GetProj(); s->setMat4("u_model", model); s->setMat4("u_view", view); s->setMat4("u_proj", proj); lines->Draw(); } } void CameraRaySnapshot() { delete lines; // ok if null (e.g. first instance) std::vector<glm::vec3> poss, dirs, tClrs, bClrs; glm::vec2 imgSize(screenDim); for (int x = 0; x < imgSize.x; x++) { for (int y = 0; y < imgSize.y; y++) { glm::vec2 screenCoord( (2.0f * x) / imgSize.x - 1.0f, (-2.0f * y) / imgSize.y + 1.0f); Ray ray = info.camera.makeRay(screenCoord); poss.push_back(ray.origin); dirs.push_back(ray.direction); } } // debug angle thing //float angle = glm::atan(100); //Ray ray = info.camera.makeRay({ 0, 0 }); //for (int i = 0; i < 1000; i++) //{ // glm::vec3 dir = ray.direction; // // generate random point on unit sphere // float theta = Utils::get_random(0, glm::two_pi<float>()); // range 0 to 2pi // float u = Utils::get_random(-1, 1); // range -1 to 1 // float squ = glm::sqrt(1 - u * u); // avoid computing this twice // glm::vec3 offset; // offset.x = glm::cos(theta) * squ; // offset.y = glm::sin(theta) * squ; // offset.z = u; // // radius of cone, from tan(radius/h)=angle // // x = 1 since this is unit cone // float radius = glm::atan(angle); // dir += (offset * radius); // glm::vec3 pos = ray.origin; // poss.push_back(pos); // dirs.push_back(glm::normalize(dir)); //} for (int i = 0; i < poss.size(); i++) { tClrs.push_back(glm::vec3(1)); bClrs.push_back(glm::vec3(0)); } lines = new LinePool(poss, dirs, tClrs, bClrs); } int& ShadowRays() { return info.numShadowRays; } }
8282028b2cb5b197917ca613b8e4f712e8dad0b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define SIZ 20 #define num_inp 4 using namespace std; typedef struct edge { int first, second; } edges; __global__ void bfs(const edge *edges, int *vertices, int current_depth) { int a = blockDim.x * blockIdx.x + threadIdx.x; int vfirst = edges[a].first; int dfirst = vertices[vfirst]; int vsecond = edges[a].second; int dsecond = vertices[vsecond]; if ((dfirst == current_depth) && (dsecond == -1)) { vertices[vsecond] = dfirst + 1; } if ((dfirst == -1) && (dsecond == current_depth)) { vertices[vfirst] = dsecond + 1; } }
8282028b2cb5b197917ca613b8e4f712e8dad0b4.cu
#include "includes.h" #define SIZ 20 #define num_inp 4 using namespace std; typedef struct edge { int first, second; } edges; __global__ void bfs(const edge *edges, int *vertices, int current_depth) { int a = blockDim.x * blockIdx.x + threadIdx.x; int vfirst = edges[a].first; int dfirst = vertices[vfirst]; int vsecond = edges[a].second; int dsecond = vertices[vsecond]; if ((dfirst == current_depth) && (dsecond == -1)) { vertices[vsecond] = dfirst + 1; } if ((dfirst == -1) && (dsecond == current_depth)) { vertices[vfirst] = dsecond + 1; } }
72a87a6470dd4e4897b5e21201ab75e030b6e31e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ extern "C" { texture<unsigned char, 2> uchar_tex; texture<uchar2, 2> uchar2_tex; texture<unsigned short, 2> ushort_tex; texture<ushort2, 2> ushort2_tex; __global__ void Thumbnail_uchar(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { unsigned char pixel = tex2D(uchar_tex, x, y); atomicAdd(&histogram[pixel], 1); } } __global__ void Thumbnail_uchar2(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { uchar2 pixel = tex2D(uchar2_tex, x, y); atomicAdd(&histogram[pixel.x], 1); atomicAdd(&histogram[256 + pixel.y], 1); } } __global__ void Thumbnail_ushort(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { unsigned short pixel = (tex2D(ushort_tex, x, y) + 128) >> 8; atomicAdd(&histogram[pixel], 1); } } __global__ void Thumbnail_ushort2(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { ushort2 pixel = tex2D(ushort2_tex, x, y); atomicAdd(&histogram[(pixel.x + 128) >> 8], 1); atomicAdd(&histogram[256 + (pixel.y + 128) >> 8], 1); } } }
72a87a6470dd4e4897b5e21201ab75e030b6e31e.cu
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ extern "C" { texture<unsigned char, 2> uchar_tex; texture<uchar2, 2> uchar2_tex; texture<unsigned short, 2> ushort_tex; texture<ushort2, 2> ushort2_tex; __global__ void Thumbnail_uchar(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { unsigned char pixel = tex2D(uchar_tex, x, y); atomicAdd(&histogram[pixel], 1); } } __global__ void Thumbnail_uchar2(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { uchar2 pixel = tex2D(uchar2_tex, x, y); atomicAdd(&histogram[pixel.x], 1); atomicAdd(&histogram[256 + pixel.y], 1); } } __global__ void Thumbnail_ushort(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { unsigned short pixel = (tex2D(ushort_tex, x, y) + 128) >> 8; atomicAdd(&histogram[pixel], 1); } } __global__ void Thumbnail_ushort2(int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { ushort2 pixel = tex2D(ushort2_tex, x, y); atomicAdd(&histogram[(pixel.x + 128) >> 8], 1); atomicAdd(&histogram[256 + (pixel.y + 128) >> 8], 1); } } }
1f1e33304e7501842b0861e95bc85e6da3047e61.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "initialize.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *oA = NULL; hipMalloc(&oA, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float totalSize = XSIZE*YSIZE; int n = XSIZE*YSIZE; int ghosts = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( initialize), dim3(gridBlock),dim3(threadBlock), 0, 0, a,oA,x,totalSize,n,ghosts); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( initialize), dim3(gridBlock),dim3(threadBlock), 0, 0, a,oA,x,totalSize,n,ghosts); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( initialize), dim3(gridBlock),dim3(threadBlock), 0, 0, a,oA,x,totalSize,n,ghosts); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1f1e33304e7501842b0861e95bc85e6da3047e61.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "initialize.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *oA = NULL; cudaMalloc(&oA, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float totalSize = XSIZE*YSIZE; int n = XSIZE*YSIZE; int ghosts = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); initialize<<<gridBlock,threadBlock>>>(a,oA,x,totalSize,n,ghosts); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { initialize<<<gridBlock,threadBlock>>>(a,oA,x,totalSize,n,ghosts); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { initialize<<<gridBlock,threadBlock>>>(a,oA,x,totalSize,n,ghosts); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3203e72368aac552aff1fffb0ceea7ec4f8cc704.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright [2016] <Contributors> * \file Correation.cu * \brief correlation1D operator * \author Xu Dong */ #include "./correlation1D-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define correlation1D_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // == correlation1D Kernel template <typename Dtype> __global__ void Correlate1DData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = (Dtype *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width + x_shift) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { Dtype total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } // == correlation1D Backward Pass Kernel (For data1) template <typename Dtype> __global__ void Correlate1DDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void Correlate1DDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - 0) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Forward // == Dimension rearrangement Kernel template <typename Dtype> __global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { // change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel] int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + 0); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } template <typename Dtype> void Forward_gpu( const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, hipStream_t stream, hipStream_t stream_tmp1, hipStream_t stream_tmp2) { const Dtype *bottom_data1 = data1.dptr_; const Dtype *bottom_data2 = data2.dptr_; Dtype *rbot1 = tmp1.dptr_; Dtype *rbot2 = tmp2.dptr_; Dtype *top = out.dptr_; const int bnum = data1.size(0); const int bchannels = data1.size(1); const int bheight = data1.size(2); const int bwidth = data1.size(3); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int threads_per_block = 16; dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight); hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp1, bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp2, bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight; const int width = bwidth + 2 * pad_size_; const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels; int x_shift = - neighborhood_grid_radius_; if(single_side == -1) { // to the left x_shift = -neighborhood_grid_width_; } else if(single_side == 1) { // to the right x_shift = 0; } // correlation1DLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); hipLaunchKernelGGL(( Correlate1DData<Dtype>), dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(Dtype), stream, topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); correlation1D_CUDA_CHECK(hipPeekAtLastError()); } template <typename Dtype> void Backward_gpu( const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, hipStream_t stream0, hipStream_t stream1, int num, int channels, int height, int width) { // Get top diff, compute bottom diff const Dtype* top_diff = out_grad.dptr_; Dtype* bottom0_diff = in_grad1.dptr_; Dtype* bottom1_diff = in_grad2.dptr_; const Dtype* rbot1 = tmp1.dptr_; const Dtype* rbot2 = tmp2.dptr_; const int paddedheight = height ; const int paddedwidth = width + 2 * pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; // correlation1DLayerBackward int x_shift = - neighborhood_grid_radius_; if (single_side == -1) { // to the left x_shift = -neighborhood_grid_width_; } else if(single_side == 1) { // to the right x_shift = 0; } // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest const int buffer_size_backw0 = \ (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\ / static_cast<float>(stride1_))) + 1) * top_channels_; // == Run kernel Backward 0 for (int n = 0; n < num; n++) { hipLaunchKernelGGL(( Correlate1DDataBackward0<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2, top_diff); correlation1D_CUDA_CHECK(hipPeekAtLastError()); } // == Run kernel Backward 1 for (int n = 0; n < num; n++) { hipLaunchKernelGGL(( Correlate1DDataBackward1<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, bottom1_diff, top_diff); correlation1D_CUDA_CHECK(hipPeekAtLastError()); } } } // namespace cuda template<typename Dtype> inline void Correlation1DForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_ ) { hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_); hipStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_); cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream, stream_tmp1, stream_tmp2); } template<typename Dtype> inline void Correlation1DBackward(const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, int num, int channels, int height, int width ) { hipStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_); hipStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_); cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream0, stream1, num, channels, height, width); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(Correlation1DParam param) { return new Correlation1DOp<gpu>(param); } } // namespace op } // namespace mxnet
3203e72368aac552aff1fffb0ceea7ec4f8cc704.cu
/*! * Copyright [2016] <Contributors> * \file Correation.cu * \brief correlation1D operator * \author Xu Dong */ #include "./correlation1D-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define correlation1D_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // == correlation1D Kernel template <typename Dtype> __global__ void Correlate1DData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = (Dtype *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width + x_shift) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { Dtype total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } // == correlation1D Backward Pass Kernel (For data1) template <typename Dtype> __global__ void Correlate1DDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void Correlate1DDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - 0) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Forward // == Dimension rearrangement Kernel template <typename Dtype> __global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { // change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel] int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + 0); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } template <typename Dtype> void Forward_gpu( const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream, cudaStream_t stream_tmp1, cudaStream_t stream_tmp2) { const Dtype *bottom_data1 = data1.dptr_; const Dtype *bottom_data2 = data2.dptr_; Dtype *rbot1 = tmp1.dptr_; Dtype *rbot2 = tmp2.dptr_; Dtype *top = out.dptr_; const int bnum = data1.size(0); const int bchannels = data1.size(1); const int bheight = data1.size(2); const int bwidth = data1.size(3); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int threads_per_block = 16; dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight); blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp1>>> (bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp2>>> (bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight; const int width = bwidth + 2 * pad_size_; const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels; int x_shift = - neighborhood_grid_radius_; if(single_side == -1) { // to the left x_shift = -neighborhood_grid_width_; } else if(single_side == 1) { // to the right x_shift = 0; } // correlation1DLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); Correlate1DData<Dtype><<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(Dtype), stream>>>( topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); correlation1D_CUDA_CHECK(cudaPeekAtLastError()); } template <typename Dtype> void Backward_gpu( const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream0, cudaStream_t stream1, int num, int channels, int height, int width) { // Get top diff, compute bottom diff const Dtype* top_diff = out_grad.dptr_; Dtype* bottom0_diff = in_grad1.dptr_; Dtype* bottom1_diff = in_grad2.dptr_; const Dtype* rbot1 = tmp1.dptr_; const Dtype* rbot2 = tmp2.dptr_; const int paddedheight = height ; const int paddedwidth = width + 2 * pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; // correlation1DLayerBackward int x_shift = - neighborhood_grid_radius_; if (single_side == -1) { // to the left x_shift = -neighborhood_grid_width_; } else if(single_side == 1) { // to the right x_shift = 0; } // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest const int buffer_size_backw0 = \ (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\ / static_cast<float>(stride1_))) + 1) * top_channels_; // == Run kernel Backward 0 for (int n = 0; n < num; n++) { Correlate1DDataBackward0<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2, top_diff); correlation1D_CUDA_CHECK(cudaPeekAtLastError()); } // == Run kernel Backward 1 for (int n = 0; n < num; n++) { Correlate1DDataBackward1<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, bottom1_diff, top_diff); correlation1D_CUDA_CHECK(cudaPeekAtLastError()); } } } // namespace cuda template<typename Dtype> inline void Correlation1DForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_ ) { cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); cudaStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_); cudaStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_); cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream, stream_tmp1, stream_tmp2); } template<typename Dtype> inline void Correlation1DBackward(const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, int num, int channels, int height, int width ) { cudaStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_); cudaStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_); cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream0, stream1, num, channels, height, width); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(Correlation1DParam param) { return new Correlation1DOp<gpu>(param); } } // namespace op } // namespace mxnet
34c2d3961f19604ae6a4a3ffa3229c25d53852ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //int main() //{ // int iDev = 0; // hipDeviceProp_t iProp; // // hipGetDeviceProperties(&iProp, iDev); // printf("Max threads per SM : %d \n", // iProp.maxThreadsPerMultiProcessor); // // return 0; //}
34c2d3961f19604ae6a4a3ffa3229c25d53852ef.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //int main() //{ // int iDev = 0; // cudaDeviceProp iProp; // // cudaGetDeviceProperties(&iProp, iDev); // printf("Max threads per SM : %d \n", // iProp.maxThreadsPerMultiProcessor); // // return 0; //}
1169ff270df91274aff2bdfd729bef130a91dccf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <cuda_utils.cuh> #include <cuml/common/cuml_allocator.hpp> #include <cuml/linear_model/preprocess_mg.hpp> #include <cuml/solvers/cd_mg.hpp> #include <functions/softThres.cuh> #include <linalg/add.cuh> #include <linalg/eltwise.cuh> #include <linalg/gemm.cuh> #include <linalg/multiply.cuh> #include <linalg/subtract.cuh> #include <matrix/math.cuh> #include <matrix/matrix.cuh> #include <opg/linalg/mv_aTb.hpp> #include <opg/linalg/norm.hpp> #include <raft/comms/comms.hpp> #include "shuffle.h" using namespace MLCommon; namespace ML { namespace CD { namespace opg { template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, hipStream_t *streams, int n_streams, bool verbose) { const auto &comm = handle.get_comms(); hipblasHandle_t cublas_handle = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> partsToRanks = input_desc.blocksOwnedBy(comm.get_rank()); size_t total_M = 0.0; for (int i = 0; i < partsToRanks.size(); i++) { total_M += partsToRanks[i]->size; } device_buffer<T> pred(allocator, streams[0], total_M); device_buffer<T> residual(allocator, streams[0], total_M); device_buffer<T> squared(allocator, streams[0], input_desc.N); device_buffer<T> mu_input(allocator, streams[0]); device_buffer<T> norm2_input(allocator, streams[0]); device_buffer<T> mu_labels(allocator, streams[0]); std::vector<T> h_coef(input_desc.N, T(0)); if (fit_intercept) { mu_input.resize(input_desc.N, streams[0]); mu_labels.resize(1, streams[0]); if (normalize) { norm2_input.resize(input_desc.N, streams[0]); } GLM::opg::preProcessData(handle, input_data, input_desc, labels, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } std::vector<int> ri(input_desc.N); std::mt19937 g(rand()); size_t memsize = input_desc.N * sizeof(int); int *ri_h = (int *)malloc(memsize); CUDA_CHECK(hipHostRegister(ri_h, memsize, hipHostRegisterDefault)); if (comm.get_rank() == 0) { ML::Solver::initShuffle(ri, g); for (int i = 0; i < input_desc.N; i++) { ri_h[i] = ri[i]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M; alpha = l1_ratio * alpha * input_desc.M; if (normalize) { T scalar = T(1.0) + l2_alpha; raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]); } else { Matrix::Data<T> squared_data{squared.data(), size_t(input_desc.N)}; LinAlg::opg::colNorm2NoSeq(squared_data, input_data, input_desc, comm, allocator, streams, n_streams, cublas_handle); LinAlg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]); } std::vector<Matrix::Data<T> *> input_data_temp; Matrix::PartDescriptor input_desc_temp = input_desc; input_desc_temp.N = size_t(1); std::vector<Matrix::Data<T> *> residual_temp; Matrix::Data<T> coef_loc_data; T *rs = residual.data(); for (int i = 0; i < partsToRanks.size(); i++) { raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]); Matrix::Data<T> *rs_data = new Matrix::Data<T>(); rs_data->ptr = rs; rs_data->totalSize = partsToRanks[i]->size; residual_temp.push_back(rs_data); Matrix::Data<T> *temp_data = new Matrix::Data<T>(); temp_data->totalSize = partsToRanks[i]->size; input_data_temp.push_back(temp_data); rs += partsToRanks[i]->size; } for (int i = 0; i < epochs; i++) { if (i > 0 && shuffle) { if (comm.get_rank() == 0) { Solver::shuffle(ri, g); for (int k = 0; k < input_desc.N; k++) { ri_h[k] = ri[k]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); } T coef_max = 0.0; T d_coef_max = 0.0; T coef_prev = 0.0; for (int j = 0; j < input_desc.N; j++) { int ci = ri_h[j]; T *coef_loc = coef + ci; T *squared_loc = squared.data() + ci; T *input_col_loc; T *pred_loc = pred.data(); T *residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); input_data_temp[k]->ptr = input_col_loc; input_data_temp[k]->totalSize = partsToRanks[k]->size; LinAlg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); LinAlg::add(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(hipStreamSynchronize(streams[k])); } coef_loc_data.ptr = coef_loc; coef_loc_data.totalSize = size_t(1); LinAlg::opg::mv_aTb(coef_loc_data, input_data_temp, input_desc_temp, residual_temp, comm, allocator, streams, n_streams, cublas_handle); if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]); raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]); coef_prev = h_coef[ci]; raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]); CUDA_CHECK(hipStreamSynchronize(streams[0])); T diff = abs(coef_prev - h_coef[ci]); if (diff > d_coef_max) d_coef_max = diff; if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]); pred_loc = pred.data(); residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); LinAlg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); LinAlg::subtract(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(hipStreamSynchronize(streams[k])); } } bool flag_continue = true; if (coef_max == T(0)) { flag_continue = false; } if ((d_coef_max / coef_max) < tol) { flag_continue = false; } if (!flag_continue) { break; } } CUDA_CHECK(hipHostUnregister(ri_h)); free(ri_h); for (int i = 0; i < partsToRanks.size(); i++) { delete residual_temp[i]; delete input_data_temp[i]; } if (fit_intercept) { GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef, intercept, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } else { *intercept = T(0); } } /** * @brief performs MNMG fit operation for the ols * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param labels: labels data * @output param coef: learned regression coefficients * @output param intercept: intercept value * @input param fit_intercept: fit intercept or not * @input param normalize: normalize the data or not * @input param verbose */ template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); ; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } template <typename T> void predict_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *coef, T intercept, std::vector<Matrix::Data<T> *> &preds, hipStream_t *streams, int n_streams, bool verbose) { std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks; T alpha = T(1); T beta = T(0); for (int i = 0; i < input_data.size(); i++) { int si = i % n_streams; LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef, preds[i]->ptr, local_blocks[i]->size, size_t(1), HIPBLAS_OP_N, HIPBLAS_OP_N, alpha, beta, handle.get_cublas_handle(), streams[si]); LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]); } } template <typename T> void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, size_t n_rows, size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = n_parts; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } predict_impl(handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<float> *> &labels, float *coef, float *intercept, bool fit_intercept, bool normalize, int epochs, float alpha, float l1_ratio, bool shuffle, float tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void fit(raft::handle_t &handle, std::vector<Matrix::Data<double> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<double> *> &labels, double *coef, double *intercept, bool fit_intercept, bool normalize, int epochs, double alpha, double l1_ratio, bool shuffle, double tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **input, size_t n_rows, size_t n_cols, float *coef, float intercept, Matrix::Data<float> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **input, size_t n_rows, size_t n_cols, double *coef, double intercept, Matrix::Data<double> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } } // namespace opg } // namespace CD } // namespace ML
1169ff270df91274aff2bdfd729bef130a91dccf.cu
/* * Copyright (c) 2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <cuda_utils.cuh> #include <cuml/common/cuml_allocator.hpp> #include <cuml/linear_model/preprocess_mg.hpp> #include <cuml/solvers/cd_mg.hpp> #include <functions/softThres.cuh> #include <linalg/add.cuh> #include <linalg/eltwise.cuh> #include <linalg/gemm.cuh> #include <linalg/multiply.cuh> #include <linalg/subtract.cuh> #include <matrix/math.cuh> #include <matrix/matrix.cuh> #include <opg/linalg/mv_aTb.hpp> #include <opg/linalg/norm.hpp> #include <raft/comms/comms.hpp> #include "shuffle.h" using namespace MLCommon; namespace ML { namespace CD { namespace opg { template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, cudaStream_t *streams, int n_streams, bool verbose) { const auto &comm = handle.get_comms(); cublasHandle_t cublas_handle = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> partsToRanks = input_desc.blocksOwnedBy(comm.get_rank()); size_t total_M = 0.0; for (int i = 0; i < partsToRanks.size(); i++) { total_M += partsToRanks[i]->size; } device_buffer<T> pred(allocator, streams[0], total_M); device_buffer<T> residual(allocator, streams[0], total_M); device_buffer<T> squared(allocator, streams[0], input_desc.N); device_buffer<T> mu_input(allocator, streams[0]); device_buffer<T> norm2_input(allocator, streams[0]); device_buffer<T> mu_labels(allocator, streams[0]); std::vector<T> h_coef(input_desc.N, T(0)); if (fit_intercept) { mu_input.resize(input_desc.N, streams[0]); mu_labels.resize(1, streams[0]); if (normalize) { norm2_input.resize(input_desc.N, streams[0]); } GLM::opg::preProcessData(handle, input_data, input_desc, labels, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } std::vector<int> ri(input_desc.N); std::mt19937 g(rand()); size_t memsize = input_desc.N * sizeof(int); int *ri_h = (int *)malloc(memsize); CUDA_CHECK(cudaHostRegister(ri_h, memsize, cudaHostRegisterDefault)); if (comm.get_rank() == 0) { ML::Solver::initShuffle(ri, g); for (int i = 0; i < input_desc.N; i++) { ri_h[i] = ri[i]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M; alpha = l1_ratio * alpha * input_desc.M; if (normalize) { T scalar = T(1.0) + l2_alpha; raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]); } else { Matrix::Data<T> squared_data{squared.data(), size_t(input_desc.N)}; LinAlg::opg::colNorm2NoSeq(squared_data, input_data, input_desc, comm, allocator, streams, n_streams, cublas_handle); LinAlg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]); } std::vector<Matrix::Data<T> *> input_data_temp; Matrix::PartDescriptor input_desc_temp = input_desc; input_desc_temp.N = size_t(1); std::vector<Matrix::Data<T> *> residual_temp; Matrix::Data<T> coef_loc_data; T *rs = residual.data(); for (int i = 0; i < partsToRanks.size(); i++) { raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]); Matrix::Data<T> *rs_data = new Matrix::Data<T>(); rs_data->ptr = rs; rs_data->totalSize = partsToRanks[i]->size; residual_temp.push_back(rs_data); Matrix::Data<T> *temp_data = new Matrix::Data<T>(); temp_data->totalSize = partsToRanks[i]->size; input_data_temp.push_back(temp_data); rs += partsToRanks[i]->size; } for (int i = 0; i < epochs; i++) { if (i > 0 && shuffle) { if (comm.get_rank() == 0) { Solver::shuffle(ri, g); for (int k = 0; k < input_desc.N; k++) { ri_h[k] = ri[k]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); } T coef_max = 0.0; T d_coef_max = 0.0; T coef_prev = 0.0; for (int j = 0; j < input_desc.N; j++) { int ci = ri_h[j]; T *coef_loc = coef + ci; T *squared_loc = squared.data() + ci; T *input_col_loc; T *pred_loc = pred.data(); T *residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); input_data_temp[k]->ptr = input_col_loc; input_data_temp[k]->totalSize = partsToRanks[k]->size; LinAlg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); LinAlg::add(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(cudaStreamSynchronize(streams[k])); } coef_loc_data.ptr = coef_loc; coef_loc_data.totalSize = size_t(1); LinAlg::opg::mv_aTb(coef_loc_data, input_data_temp, input_desc_temp, residual_temp, comm, allocator, streams, n_streams, cublas_handle); if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]); raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]); coef_prev = h_coef[ci]; raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]); CUDA_CHECK(cudaStreamSynchronize(streams[0])); T diff = abs(coef_prev - h_coef[ci]); if (diff > d_coef_max) d_coef_max = diff; if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]); pred_loc = pred.data(); residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); LinAlg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); LinAlg::subtract(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(cudaStreamSynchronize(streams[k])); } } bool flag_continue = true; if (coef_max == T(0)) { flag_continue = false; } if ((d_coef_max / coef_max) < tol) { flag_continue = false; } if (!flag_continue) { break; } } CUDA_CHECK(cudaHostUnregister(ri_h)); free(ri_h); for (int i = 0; i < partsToRanks.size(); i++) { delete residual_temp[i]; delete input_data_temp[i]; } if (fit_intercept) { GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef, intercept, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } else { *intercept = T(0); } } /** * @brief performs MNMG fit operation for the ols * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param labels: labels data * @output param coef: learned regression coefficients * @output param intercept: intercept value * @input param fit_intercept: fit intercept or not * @input param normalize: normalize the data or not * @input param verbose */ template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); ; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } template <typename T> void predict_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *coef, T intercept, std::vector<Matrix::Data<T> *> &preds, cudaStream_t *streams, int n_streams, bool verbose) { std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks; T alpha = T(1); T beta = T(0); for (int i = 0; i < input_data.size(); i++) { int si = i % n_streams; LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef, preds[i]->ptr, local_blocks[i]->size, size_t(1), CUBLAS_OP_N, CUBLAS_OP_N, alpha, beta, handle.get_cublas_handle(), streams[si]); LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]); } } template <typename T> void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, size_t n_rows, size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = n_parts; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } predict_impl(handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<float> *> &labels, float *coef, float *intercept, bool fit_intercept, bool normalize, int epochs, float alpha, float l1_ratio, bool shuffle, float tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void fit(raft::handle_t &handle, std::vector<Matrix::Data<double> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<double> *> &labels, double *coef, double *intercept, bool fit_intercept, bool normalize, int epochs, double alpha, double l1_ratio, bool shuffle, double tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **input, size_t n_rows, size_t n_cols, float *coef, float intercept, Matrix::Data<float> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **input, size_t n_rows, size_t n_cols, double *coef, double intercept, Matrix::Data<double> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } } // namespace opg } // namespace CD } // namespace ML
c2639656ad192a2f1855a0b5ed366bf08ceaba6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/native/sparse/SparseUtils.h> #include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <THH/THHThrustAllocator.cuh> #include <THH/THHTensorSort.cuh> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/gather.h> #include <thrust/generate.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #if TORCH_HIP_VERSION >= 7000 #include <thrust/system/hip/execution_policy.h> #endif namespace at { namespace native { SparseTensor coalesce_sparse_cuda(const SparseTensor& self) { #ifndef __HIP_PLATFORM_HCC__ int64_t nnz = self._nnz(); if (nnz < 2) { _get_sparse_impl(self)->set_coalesced(true); } if (self.is_coalesced()) { return self; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Replace instances with // For indices, a simple sort + unique suffices // For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection). // TODO: I'm not sure if this could ever be non-contiguous LongTensor values = self._values().contiguous(); int64_t sparseDims = self._sparseDims(); int64_t stride = values.stride(0); // indices will be modified by Thrust, so we have to clone or use new storage // here. LongTensor indices1D = _newFlattenedIndices(self, true); LongTensor origIndices = at::empty({nnz}, self._indices().options()); LongTensor uniqueOffsets = at::empty({nnz}, self._indices().options()); typedef thrust::device_ptr<int64_t> thrust_ptr; thrust_ptr indicesIter(indices1D.data<int64_t>()); thrust_ptr origIndicesIter(origIndices.data<int64_t>()); thrust_ptr uniqueOffsetsIter(uniqueOffsets.data<int64_t>()); // Fill sortedOrigIndices with sequential indices thrust::counting_iterator<int64_t> countIterI(TH_INDEX_BASE); thrust::counting_iterator<int64_t> countIterO(TH_INDEX_BASE); thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter); thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter); thrust::sort_by_key(policy, indicesIter, indicesIter + nnz, origIndicesIter, ThrustLTOp<int64_t>() ); // this forces device-host synchronization! thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy, indicesIter, indicesIter + nnz, uniqueOffsetsIter ); int64_t newNnz = newEnd.first - indicesIter; indices1D.resize_({1, newNnz}); std::vector<int64_t> newValues_size(values.sizes()); newValues_size[0] = newNnz; Tensor newValues = at::empty(newValues_size, values.options()); dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); AT_DISPATCH_ALL_TYPES_AND_HALF( values.type(), "coalesce_sparse_cuda", [&] { using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>; hipLaunchKernelGGL(( apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t>), dim3(grid), dim3(block), 0, stream, uniqueOffsets.data<int64_t>(), origIndices.data<int64_t>(), values.data<scalar_t>(), newValues.data<scalar_t>(), nnz, newNnz, stride ); }); // this grid-strided version is slower but probably more flexible // to different sizes // int64_t blockX = min(stride, (int64_t) 512); // dim3 block(blockX, 512 / blockX); // int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y)); // THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>( // THCIndexTensor_(data)(state, uniqueOffsets), // THCIndexTensor_(data)(state, origIndices), // THCTensor_(data)(state, values), // THCTensor_(data)(state, newValues), // nnz, // newNnz, // stride // ); //////////////////////////////////////////////////////////// // unflatten indices if necessary LongTensor newIndices; if (sparseDims == 1) { newIndices = indices1D; } else { newIndices = at::empty({sparseDims, newNnz}, origIndices.options()); if (TH_INDEX_BASE != 0) { indices1D.add_(-1); } for (int64_t d = sparseDims - 1; d >= 0; d--) { // NB: Not a select, so I can preserve the outer dimension LongTensor indicesSlice = newIndices.narrow(0, d, 1); // Note for the porting guide: THCTensor_(copy) does NOT do normal // broadcasting logic; instead, it will blast the elements from one // to the other so long as the numel is the same indicesSlice.copy_(indices1D); indices1D.div_(self.size(d)); indicesSlice.add_(indices1D, -self.size(d)); } if (TH_INDEX_BASE != 0) { indices1D.add_(1); // "lol" } } //////////////////////////////////////////////////////////// SparseTensor dst = ::at::native::sparse_coo_tensor(newIndices, newValues, self.sizes()); _get_sparse_impl(dst)->set_coalesced(true); THCudaCheck(hipGetLastError()); return dst; #else AT_ERROR("coalesce_sparse_cuda: HIP not supported"); #endif } }} // namespace at::native
c2639656ad192a2f1855a0b5ed366bf08ceaba6e.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/native/sparse/SparseUtils.h> #include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCThrustAllocator.cuh> #include <THC/THCTensorSort.cuh> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/gather.h> #include <thrust/generate.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #if CUDA_VERSION >= 7000 #include <thrust/system/cuda/execution_policy.h> #endif namespace at { namespace native { SparseTensor coalesce_sparse_cuda(const SparseTensor& self) { #ifndef __HIP_PLATFORM_HCC__ int64_t nnz = self._nnz(); if (nnz < 2) { _get_sparse_impl(self)->set_coalesced(true); } if (self.is_coalesced()) { return self; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Replace instances with // For indices, a simple sort + unique suffices // For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection). // TODO: I'm not sure if this could ever be non-contiguous LongTensor values = self._values().contiguous(); int64_t sparseDims = self._sparseDims(); int64_t stride = values.stride(0); // indices will be modified by Thrust, so we have to clone or use new storage // here. LongTensor indices1D = _newFlattenedIndices(self, true); LongTensor origIndices = at::empty({nnz}, self._indices().options()); LongTensor uniqueOffsets = at::empty({nnz}, self._indices().options()); typedef thrust::device_ptr<int64_t> thrust_ptr; thrust_ptr indicesIter(indices1D.data<int64_t>()); thrust_ptr origIndicesIter(origIndices.data<int64_t>()); thrust_ptr uniqueOffsetsIter(uniqueOffsets.data<int64_t>()); // Fill sortedOrigIndices with sequential indices thrust::counting_iterator<int64_t> countIterI(TH_INDEX_BASE); thrust::counting_iterator<int64_t> countIterO(TH_INDEX_BASE); thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter); thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter); thrust::sort_by_key(policy, indicesIter, indicesIter + nnz, origIndicesIter, ThrustLTOp<int64_t>() ); // this forces device-host synchronization! thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy, indicesIter, indicesIter + nnz, uniqueOffsetsIter ); int64_t newNnz = newEnd.first - indicesIter; indices1D.resize_({1, newNnz}); std::vector<int64_t> newValues_size(values.sizes()); newValues_size[0] = newNnz; Tensor newValues = at::empty(newValues_size, values.options()); dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); AT_DISPATCH_ALL_TYPES_AND_HALF( values.type(), "coalesce_sparse_cuda", [&] { using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>; apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t><<<grid, block, 0, stream>>>( uniqueOffsets.data<int64_t>(), origIndices.data<int64_t>(), values.data<scalar_t>(), newValues.data<scalar_t>(), nnz, newNnz, stride ); }); // this grid-strided version is slower but probably more flexible // to different sizes // int64_t blockX = min(stride, (int64_t) 512); // dim3 block(blockX, 512 / blockX); // int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y)); // THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>( // THCIndexTensor_(data)(state, uniqueOffsets), // THCIndexTensor_(data)(state, origIndices), // THCTensor_(data)(state, values), // THCTensor_(data)(state, newValues), // nnz, // newNnz, // stride // ); //////////////////////////////////////////////////////////// // unflatten indices if necessary LongTensor newIndices; if (sparseDims == 1) { newIndices = indices1D; } else { newIndices = at::empty({sparseDims, newNnz}, origIndices.options()); if (TH_INDEX_BASE != 0) { indices1D.add_(-1); } for (int64_t d = sparseDims - 1; d >= 0; d--) { // NB: Not a select, so I can preserve the outer dimension LongTensor indicesSlice = newIndices.narrow(0, d, 1); // Note for the porting guide: THCTensor_(copy) does NOT do normal // broadcasting logic; instead, it will blast the elements from one // to the other so long as the numel is the same indicesSlice.copy_(indices1D); indices1D.div_(self.size(d)); indicesSlice.add_(indices1D, -self.size(d)); } if (TH_INDEX_BASE != 0) { indices1D.add_(1); // "lol" } } //////////////////////////////////////////////////////////// SparseTensor dst = ::at::native::sparse_coo_tensor(newIndices, newValues, self.sizes()); _get_sparse_impl(dst)->set_coalesced(true); THCudaCheck(cudaGetLastError()); return dst; #else AT_ERROR("coalesce_sparse_cuda: HIP not supported"); #endif } }} // namespace at::native
02aed3b0f240d7fef138cdea037a6ef732c801a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" namespace dragon { namespace kernel { template <typename T> __global__ void _Empty() { } template<> void Empty<float, CUDAContext>() { _Empty<float> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } template<> void Empty<float16, CUDAContext>() { _Empty<float16> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } /******************** activation.dropout ********************/ template<typename T> __global__ void _Dropout(const int count, const uint32_t thresh, const T scale, const T* x, const uint32_t* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] * (mask[idx] > thresh) * scale; } } template<> void Dropout<float, CUDAContext>(const int count, float prob, float scale, const float* x, uint32_t* mask, float* y, CUDAContext* context) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); math::RandomUniform<uint32_t, CUDAContext>(count, float(0), float(UINT_MAX), mask); _Dropout<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, x, mask, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _DropoutGrad(const int count, const uint32_t thresh, const T scale, const T* dy, const uint32_t* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * (mask[idx] > thresh) * scale; } } template<> void DropoutGrad<float, CUDAContext>(const int count, float prob, float scale, const float* dy, const uint32_t* mask, float* dx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); _DropoutGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, dy, mask, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.prelu ********************/ template <typename T> __global__ void _PRelu(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[0]; } } template <typename T> __global__ void _PReluNCHW(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template <typename T> __global__ void _PReluNHWC(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template<> void PRelu<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* x, const float* w, float* y) { if (channel_shared) { _PRelu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else { if (data_format == "NCHW") { _PReluNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else if (data_format == "NHWC") { _PReluNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluGrad(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[0]); } } template <typename T> __global__ void _PReluGradNCHW(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template <typename T> __global__ void _PReluGradNHWC(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template<> void PReluGrad<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* w, float* dx) { if (channel_shared) { _PReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else { if (data_format == "NCHW") { _PReluGradNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else if (data_format == "NHWC") { _PReluGradNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluWGradBcast(const int count, const int rows, const int row_offset, const T* dy, const T* x, T* bcast_dw) { CUDA_KERNEL_LOOP(idx, count) { bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0); for (int n = 1; n < rows; n++) { const int cur_idx = idx + n * row_offset; bcast_dw[idx] += dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0); } } } template<> void PReluWGrad<float, CUDAContext>(const int rows, const int row_offset, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* multiplier, float* bcast_dw, float* dw) { const int cdim = channels * dim; _PReluWGradBcast<float> << < GET_BLOCKS(cdim), CUDA_NUM_THREADS >> >(cdim, rows, row_offset, dy, x, bcast_dw); CUDA_POST_KERNEL_CHECK; if (channel_shared) { float w_sum = math::Dot<float, CUDAContext>(channels * dim, bcast_dw, multiplier); math::AddScalar<float, CUDAContext>(1, w_sum, dw); } else { if (data_format == "NCHW") { math::Gemv<float, CUDAContext>(CblasNoTrans, channels, dim, 1.0, bcast_dw, multiplier, 1.0, dw); } else if (data_format == "NHWC") { math::Gemv<float, CUDAContext>(CblasTrans, dim, channels, 1.0, bcast_dw, multiplier, 1.0, dw); } else LOG(FATAL) << "Unknown data format: " << data_format; } } /******************** activation.elu ********************/ template <typename T> __global__ void _Elu(const int count, const T* x, const float alpha, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : alpha * (::exp(x[idx]) - 1); } } template<> void Elu<float, CUDAContext>(const int count, const float* x, const float alpha, float* y) { _Elu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, alpha, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _EluGrad(const int count, const T* dy, const T* y, const float alpha, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0)); } } template<> void EluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float alpha, float* dx) { _EluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, alpha, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.relu ********************/ template <typename T> __global__ void _Relu(const int count, const T* x, const float slope, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope; } } template<> void Relu<float, CUDAContext>(const int count, const float* x, const float slope, float* y) { _Relu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, slope, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ReluHalf(const int count, const half* x, const float slope, half* y) { const half kSlope = __float2half(slope); const half kZero = __float2half(0.0); CUDA_KERNEL_LOOP(idx, count) { #if __CUDA_ARCH__ >= 530 y[idx] = __hgt(x[idx], kZero) ? x[idx] : __hmul(x[idx], kSlope); #endif } } template<> void Relu<float16, CUDAContext>(const int count, const float16* x, const float slope, float16* y) { _ReluHalf<half> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(x), slope, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ReluGrad(const int count, const T* dy, const T* y, const float slope, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + slope * (y[idx] <= 0)); } } template<> void ReluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float slope, float* dx) { _ReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, slope, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.selu ********************/ template <typename T> __global__ void _SElu(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? 1.0507 * x[idx] : 1.7581 * (::exp(x[idx]) - 1); } } template<> void SElu<float, CUDAContext>(const int count, const float* x, float* y) { _SElu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SEluGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] : (1.7581 + y[idx]) * dy[idx]; } } template<> void SEluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SEluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.sigmoid ********************/ template <typename T> __device__ T _SigmoidUnit(const T x) { return T(1) / (T(1) + exp(-x)); } template <typename T> __global__ void _Sigmoid(const int n, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, n) { y[idx] = _SigmoidUnit<T>(x[idx]); } } template<> void Sigmoid<float, CUDAContext>(const int count, const float* x, float* y) { _Sigmoid<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SigmoidGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * y[idx] * (1 - y[idx]); } } template<> void SigmoidGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SigmoidGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.softmax ********************/ template <typename T> __global__ void _SoftmaxMaxClass(const int outer_dim, const int classes, const int inner_dim, const T* x, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T max_val = -FLT_MAX; for (int c = 0; c < classes; c++) max_val = max(x[(o_idx * classes + c) * inner_dim + i_idx], max_val); scale[idx] = max_val; } } template <typename T> __global__ void _SoftmaxSubtract(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] -= scale[o_idx * inner_dim + i_idx]; } } template <typename T> __global__ void _SoftmaxExp(const int count, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = ::exp(y[idx]); } } template <typename T> __global__ void _SoftmaxSumClass(const int outer_dim, const int classes, const int inner_dim, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T sum = 0; for (int c = 0; c < classes; c++) sum += y[(o_idx * classes + c) * inner_dim + i_idx]; scale[idx] = sum; } } template <typename T> __global__ void _SoftmaxDiv(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] /= scale[o_idx * inner_dim + i_idx]; } } template<> void Softmax<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* x, float* scale, float* y, CUDAContext* context) { const int num_preds = inner_dim * outer_dim; _SoftmaxMaxClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, x, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); _SoftmaxExp<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, y); _SoftmaxSumClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, y, scale); _SoftmaxDiv<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SoftmaxDot(const int outer_dim, const int classes, const int inner_dim, const T* dy, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T dot = 0; for (int c = 0; c < classes; c++) dot += (y[(o_idx * classes + c) * inner_dim + i_idx] * dy[(o_idx * classes + c) * inner_dim + i_idx]); scale[idx] = dot; } } template<> void SoftmaxGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* dy, const float* y, float* scale, float* dx) { const int num_preds = inner_dim * outer_dim; _SoftmaxDot<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, dy, y, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, dx); math::Mul<float, CUDAContext>(count, dx, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.tanh ********************/ template <typename T> __global__ void _Tanh(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(i, count) { y[i] = std::tanh(x[i]); } } template<> void Tanh<float, CUDAContext>(const int count, const float* x, float* y) { _Tanh<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TanhGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(i, count) { dx[i] = dy[i] * (1 - y[i] * y[i]); } } template<> void TanhGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _TanhGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** arithmetic.bias_add ********************/ template <typename T> __global__ void _BiasAdd_NCHW(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int bias_idx = (idx / inner_dim) % dim; y[idx] += bias[bias_idx]; } } template <typename T> __global__ void _BiasAdd_NHWC(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] += bias[idx % dim]; } } template<> void BiasAdd<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const string& data_format, const float* bias, const float* bias_multiplier, float* y) { if (data_format == "NCHW") { _BiasAdd_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else if (data_format == "NHWC") { _BiasAdd_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** arithmetic.clip ********************/ template <typename T> __global__ void _Clip(const int count, const T low, const T high, const T* x, T* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { mask[idx] = 1.0; if (x[idx] > high || x[idx] < low) mask[idx] = 0.0; y[idx] = x[idx] > high ? high : x[idx]; y[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>(const int count, const float low, const float high, const float* x, float* mask, float* y) { _Clip<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, low, high, x, mask, y); } /******************** arithmetic.scale ********************/ template <typename T> __global__ void _ScaleWithoutBias(const int n, const T* x, const T* scale, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx]; } } template <typename T> __global__ void _ScaleWithBias(const int n, const T* x, const T* scale, const T* bias, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx] + bias[scale_idx]; } } template<> void Scale<float, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, Bdata, scale_dim, inner_dim, Ydata); else _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, scale_dim, inner_dim, Ydata); } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ScaleWithoutBiasHalf(const int n, const half* x, const half* scale, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hmul(x[idx], scale[scale_idx]); #endif } } template <typename T> __global__ void _ScaleWithBiasHalf(const int n, const half* x, const half* scale, const half* bias, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hadd(__hmul(x[idx], scale[scale_idx]), bias[scale_idx]); #endif } } template<> void Scale<float16, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float16, CUDAContext>(); auto* Ydata = y->mutable_data<float16, CUDAContext>(); auto* Sdata = gamma->data<float16, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float16, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), reinterpret_cast<const half*>(Bdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); else _ScaleWithoutBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); } #endif template <> void ScaleGrad<float, CUDAContext>(const int axis, Tensor* dy, Tensor* gamma, Tensor* dx) { const int count = dx->count(); const int inner_dim = dx->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* dYdata = dy->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dYdata, Sdata, scale_dim, inner_dim, dXdata); } /******************** cast.float2half ********************/ #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _FloatToHalfKernel(const int count, const float* x, half* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = __float2half(x[idx]); } } template <> void Float2Half<float, CUDAContext>(const int count, const float* x, float16* y) { _FloatToHalfKernel<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif /******************** control_flow.compare ********************/ template <typename T> __global__ void _Equal(const int count, const T* a, const T* b, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0; } } template <> void Equal<float, CUDAContext>(const int count, const float* a, const float* b, float* y) { _Equal<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, a, b, y); CUDA_POST_KERNEL_CHECK; } /******************** loss.l1_loss ********************/ template <typename T> __global__ void _AbsGrad(const int count, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; // val > 0: 1 | val == 0: 0 | val < 0: -1 dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void AbsGrad<float, CUDAContext>(const int count, const float* dy, float* dx) { _AbsGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.sigmoid_cross_entropy ********************/ template <typename T> __global__ void _SigmoidCrossEntropy(const int count, const T* x, const T* targets, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = ::log(1 + ::exp(x[idx] - 2 * x[idx] * (x[idx] >= 0))) + x[idx] * ((x[idx] >= 0) - targets[idx]); } } template <> void SigmoidCrossEntropy<float, CUDAContext>(const int count, const float* x, const float* targets, float* loss) { _SigmoidCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, targets, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.smooth_l1_loss ********************/ template <typename T> __global__ void _SmoothL1(const int count, const float sigma2, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const T val = x[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) y[idx] = 0.5 * val * val * sigma2; else y[idx] = abs_val - 0.5 / sigma2; } } template<> void SmoothL1<float, CUDAContext>(const int count, const float sigma2, const float* x, float* y) { _SmoothL1<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SmoothL1Grad(const int count, const float sigma2, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) dx[idx] = val * sigma2; // val > 0: 1 | val == 0: 0 | val < 0: -1 else dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void SmoothL1Grad<float, CUDAContext>(const int count, const float sigma2, const float* dy, float* dx) { _SmoothL1Grad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.softmax_cross_entropy ********************/ template <typename T> __global__ void _SoftmaxCrossEntropy(const int count, const T* prob, const T* target, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN)); } } template <> void SoftmaxCrossEntropy<float, CUDAContext>(const int count, const float* prob, const float* target, float* loss) { _SoftmaxCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, prob, target, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_cross_entropy ********************/ template <typename T> __global__ void _SparseSoftmaxCrossEntropy(const int count, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { loss[idx] = -log(max(prob[(o_idx * classes + label) * inner_dim + i_idx], FLT_MIN)); valid[idx] = 1; } } } template <> void SparseSoftmaxCrossEntropy<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxCrossEntropyGrad(const int count, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { dx[(o_idx * classes + label) * inner_dim + i_idx] -= 1; valid[idx] = 1; } } } template<> void SparseSoftmaxCrossEntropyGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_focal_loss ********************/ template <typename T> __global__ void _SparseSoftmaxFocalScale(const int count, const float gamma, const T* prob, T* scale) { CUDA_KERNEL_LOOP(idx, count) { scale[idx] = ::pow((1.0f - prob[idx]), gamma); } } template <typename T> __global__ void _SparseSoftmaxFocalLoss(const int count, const float pos_alpha, const float neg_alpha, const int neg_id, T* scale, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; scale[t_] = label > neg_id ? pos_alpha * scale[t_] : neg_alpha * scale[t_]; loss[idx] = -scale[t_] * ::log(max(prob[t_], FLT_MIN)); valid[idx] = label > neg_id ? 1 : 0; } } } template <> void SparseSoftmaxFocalLoss<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, float* scale, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalScale<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, gamma, prob, scale); _SparseSoftmaxFocalLoss<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, pos_alpha, neg_alpha, neg_id, scale, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxFocalLossGrad(const int count, const float gamma, const int neg_id, const float eps, const T* scale, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; T grad = -gamma * (scale[t_] / max((1.0f - prob[t_]), eps)) * ::log(max(prob[t_], FLT_MIN)) * prob[t_] + scale[t_]; for (int c = 0; c < classes; c++) { const int i_ = (o_idx * classes + c) * inner_dim + i_idx; if (c == label) { dx[i_] = grad * (prob[t_] - 1); } else { dx[i_] = grad * prob[i_]; } } valid[idx] = label > neg_id ? 1 : 0; } } } template<> void SparseSoftmaxFocalLossGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float gamma, const int neg_id, const float eps, const float* scale, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalLossGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, gamma, neg_id, eps, scale, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** misc.image_data ********************/ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; float raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; float raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <> void ImageData<float, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ImageData<float, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Arange(const int count, const int start, const int step, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = start + idx * step; } } template<> void Arange<float, CUDAContext>(const int count, const int start, const int step, float* y) { _Arange<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } template<> void Arange<int, CUDAContext>(const int count, const int start, const int step, int* y) { _Arange<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Argmax(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T max_val = -FLT_MAX; int max_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val > max_val) { max_val = val; max_idx = j; } } y[idx] = max_idx; } } template<> void Argmax<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmax<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmin ********************/ template <typename T> __global__ void _Argmin(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T min_val = FLT_MAX; int min_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val < min_val) { min_val = val; min_idx = j; } } y[idx] = min_idx; } } template<> void Argmin<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmin<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.at ********************/ template <typename T> __global__ void _CanonicalAxis(const int count, const int dim, T* y) { CUDA_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<float, CUDAContext>(const int count, const int dim, float* y) { _CanonicalAxis<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _At(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const T* indices, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void At<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const float* indices, const float* x, float* y, CUDAContext* context) { _At<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _AtGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const T* indices, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } template <> void AtGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const float* indices, const float* dy, float* dx, CUDAContext* context) { _AtGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.concat ********************/ template <typename T> __global__ void _Concat(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; y[y_idx] = x[idx]; } } template <> void Concat<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* x, float* y, CUDAContext* context) { _Concat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Concat<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* x, float16* y, CUDAContext* context) { _Concat<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ConcatGrad(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; dx[idx] = dy[y_idx]; } } template <> void ConcatGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* dy, float* dx, CUDAContext* context) { _ConcatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ConcatGrad<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* dy, float16* dx, CUDAContext* context) { _ConcatGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.crop ********************/ template<typename T> __global__ void _Crop1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; y[idx] = x[(o * dim + ex_d + start) * inner_dim + i]; } } template<> void Crop1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const float* x, float* y, CUDAContext* context) { _Crop1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, x, y); CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Crop1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int d = (idx / inner_dim) % dim; const int o = idx / inner_dim / dim; if (d >= start && d < end) dx[idx] = dy[(o * ex_dim + d - start) * inner_dim + i]; } } template<> void Crop1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const float* dy, float* dx, CUDAContext* context) { _Crop1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.pad ********************/ template <typename T> __global__ void _ConstPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T value, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = ex_d - pad_l; y[idx] = (d < 0 || d >= dim) ? value : x[(o * dim + d) * inner_dim + i]; } } template <> void ConstPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float value, const float* x, float* y, CUDAContext* context) { _ConstPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, value, x, y); } template <typename T> __global__ void _ReflectPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void ReflectPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _ReflectPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _EdgePad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void EdgePad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _EdgePad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _ConstPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % dim + pad_l; const int o = idx / inner_dim / dim; dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i]; } } template <> void ConstPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _ConstPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _ReflectPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void ReflectPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx) { _ReflectPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _EdgePad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void EdgePad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _EdgePad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } /******************** ndarray.one_hot ********************/ template <typename T> __global__ void _OneHot(const int count, const int depth, const int on_value, const float* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { const int val = x[idx]; y[idx * depth + val] = on_value; } } template <> void OneHot<float, CUDAContext>(const int count, const int depth, const int on_value, const float* x, float* y) { _OneHot<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, depth, on_value, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.reduce ********************/ template <typename T> __global__ void _Sum(const int count, const int axis_dim, const int inner_dim, const T* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { T sum_val = 0.0; for (int j = 0; j < axis_dim; j++) sum_val += x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; y[idx] = sum_val; } } template<> void Sum<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float* x, float* y) { _Sum<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SumGrad(const int count, const int axis_dim, const int inner_dim, const T coeff, const T* dy, float* dx) { CUDA_KERNEL_LOOP(idx, count) { for (int j = 0; j < axis_dim; j++) dx[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim] = dy[idx] * coeff; } } template<> void SumGrad<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float coeff, const float* dy, float* dx) { _SumGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, coeff, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.repeat ********************/ template <typename T> __global__ void _Repeat(const int count, const int inner_dim, const int repeats, const int dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim / repeats) % dim; const int n = idx / inner_dim / repeats / dim; const int x_idx = (n * dim + b) * inner_dim + d; y[idx] = x[x_idx]; } } template <> void Repeat<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* x, float* y, CUDAContext* context) { _Repeat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _RepeatGrad(const int count, const int inner_dim, const int repeats, const int dim, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim) % dim; const int n = idx / inner_dim / dim; T gradient = 0; for (int t = 0; t < repeats; t++) gradient += dy[(((n * dim + b) * repeats) + t) * inner_dim + d]; dx[idx] = gradient; } } template <> void RepeatGrad<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* dy, float* dx, CUDAContext* context) { _RepeatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.slice ********************/ template <typename T> __global__ void _Slice(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Slice<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* x, float* y, CUDAContext* context) { _Slice<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SliceGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; dx[x_idx] = dy[idx]; } } template <> void SliceGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* dy, float* dx, CUDAContext* context) { _SliceGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.tile ********************/ template <typename T> __global__ void _Tile(const int count, const int ex_inner_dim, const int multiple, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim / multiple; const int x_idx = n * ex_inner_dim + d; y[idx] = x[x_idx]; } } template <> void Tile<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* x, float* y, CUDAContext* context) { _Tile<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TileGrad(const int count, const int ex_inner_dim, const int multiple, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim; T gradient = 0; for (int t = 0; t < multiple; t++) gradient += dy[(n * multiple + t) * ex_inner_dim + d]; dx[idx] = gradient; } } template <> void TileGrad<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* dy, float* dx, CUDAContext* context) { _TileGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.transpose ********************/ template <typename T> __global__ void _Transpose(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } y[idx] = x[x_idx]; } } template <> void Transpose<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* x, float* y) { _Transpose<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Transpose<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* x, float16* y) { _Transpose<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _TransposeGrad(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } dx[x_idx] = dy[idx]; } } template <> void TransposeGrad<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* dy, float* dx) { _TransposeGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void TransposeGrad<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* dy, float16* dx) { _TransposeGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** recurrent.lstm_uint ********************/ template <typename T> __global__ void _LSTMUnitAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x, T* x_act) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; if (ch_4 < g_offset) x_act[idx] = _SigmoidUnit<float>(x[idx]); else x_act[idx] = std::tanh(x[idx]); } } template <typename T> __global__ void _LSTMUnit(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, T* x_act, const T* cont, T* c, T* h) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; T* x_act_ = x_act + n * x_offset; const T i = x_act_[ch]; if (cont != nullptr && cont[n] != T(1)) x_act_[channels + ch] *= cont[n]; const T f = x_act_[channels + ch]; const T o = x_act_[o_offset + ch]; const T g = x_act_[g_offset + ch]; const T c_ = c[idx] = f * c_1[idx] + i * g; h[idx] = o * std::tanh(c_); } } template <> void LSTMUnit<float, CUDAContext>(const int count, const int num, const int channels, const float* c_1, const float* x, const float* cont, float* x_act, float* c, float* h) { const int o_offset = 2 * channels, g_offset = 3 * channels; const int x_offset = 4 * channels, y_count = count / 4; _LSTMUnitAct<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, g_offset, x_offset, x, x_act); _LSTMUnit<float> << <GET_BLOCKS(y_count), CUDA_NUM_THREADS >> >(y_count, channels, o_offset, g_offset, x_offset, c_1, x_act, cont, c, h); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _LSTMUnitGrad(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, const T* x_act, const T* c, const T* dc, const T* dh, T* dc_1, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; const T* x_act_ = x_act + n * x_offset; T* dx_ = dx + n * x_offset; const T i = x_act_[ch]; const T f = x_act_[channels + ch]; const T o = x_act_[o_offset + ch]; const T g = x_act_[g_offset + ch]; T* p_di = dx_ + ch; T* p_df = dx_ + channels + ch; T* p_do = dx_ + o_offset + ch; T* p_dg = dx_ + g_offset + ch; const T tanh_c_t = tanh(c[idx]); const T dc_1_sum_term = dh[idx] * o * (1 - tanh_c_t * tanh_c_t) + dc[idx]; dc_1[idx] = dc_1_sum_term * f; *p_di = dc_1_sum_term * g; *p_df = dc_1_sum_term * c_1[idx]; *p_do = dh[idx] * tanh_c_t; *p_dg = dc_1_sum_term * i; } } template <typename T> __global__ void _LSTMUnitGradAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x_act, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; const T x_act_ = x_act[idx]; if (ch_4 < g_offset) dx[idx] = dx[idx] * x_act_ * (T(1) - x_act_); else dx[idx] = dx[idx] * (T(1) - x_act_ * x_act_); } } template <> void LSTMUnitGrad<float, CUDAContext>(const int count, const int num, const int channels, const float* c_1, const float* x_act, const float* c, const float* dc, const float* dh, float* dc_1, float* dx) { const int o_offset = 2 * channels, g_offset = 3 * channels; const int x_offset = 4 * channels, y_count = count / 4; _LSTMUnitGrad<float> << <GET_BLOCKS(y_count), CUDA_NUM_THREADS >> >(y_count, channels, o_offset, g_offset, x_offset, c_1, x_act, c, dc, dh, dc_1, dx); _LSTMUnitGradAct<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, g_offset, x_offset, x_act, dx); CUDA_POST_KERNEL_CHECK; } /******************** update.adam_update ********************/ template <typename T> __global__ void _AdamUpdate(const int n, T* g, T* m, T* v, const T beta1, const T beta2, const T eps, const T lr) { CUDA_KERNEL_LOOP(i, n) { T gi = g[i]; T mi = m[i] = m[i] * beta1 + gi * (1 - beta1); T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); g[i] = lr * mi / (sqrt(vi) + eps); } } template <> void AdamUpdate<float, CUDAContext>(Tensor* x, Tensor* m, Tensor* v, Tensor* t, const float beta1, const float beta2, const float eps, const float lr) { TIndex count = x->count(); auto* Xdata = x->mutable_data<float, CUDAContext>(); auto* Mdata = m->mutable_data<float, CUDAContext>(); auto* Vdata = v->mutable_data<float, CUDAContext>(); _AdamUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Mdata, Vdata, beta1, beta2, eps, lr); CUDA_POST_KERNEL_CHECK; } /******************** update.nesterov_update ********************/ template <typename T> __global__ void _NesterovUpdate(const int n, T* g, T* h, const T momentum, const T lr) { CUDA_KERNEL_LOOP(i, n) { T hi = h[i]; T hi_new = h[i] = momentum * hi + lr * g[i]; g[i] = (1 + momentum) * hi_new - momentum * hi; } } template <> void NesterovUpdate<float, CUDAContext>(const int count, float* x, float* h, Tensor* t, const float momentum, const float lr, CUDAContext* ctx) { _NesterovUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, h, momentum, lr); CUDA_POST_KERNEL_CHECK; } /******************** update.rmsprop_update ********************/ template <typename T> __global__ void _RMSPropUpdate(const int n, T* g, T* h, const T decay, const T eps, const T lr) { CUDA_KERNEL_LOOP(i, n) { T gi = g[i]; T hi = h[i] = decay * h[i] + (1 - decay) * gi * gi; g[i] = lr * g[i] / (sqrt(hi) + eps); } } template <> void RMSPropUpdate<float, CUDAContext>(const int count, float* x, float* h, Tensor* t, const float decay, const float eps, const float lr) { _RMSPropUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, h, decay, eps, lr); CUDA_POST_KERNEL_CHECK; } /******************** vision.bilinear_resize ********************/ template <typename T> __global__ void _BilinearResize_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float top_left(x[NCHT * W + left_x_idx]); const float top_right(x[NCHT * W + right_x_idx]); const float bottom_left(x[NCHB * W + left_x_idx]); const float bottom_right(x[NCHB * W + right_x_idx]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <typename T> __global__ void _BilinearResize_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float top_left(x[(NHT * W + left_x_idx) * C + c]); const float top_right(x[(NHT * W + right_x_idx) * C + c]); const float bottom_left(x[(NHB * W + left_x_idx) * C + c]); const float bottom_right(x[(NHB * W + right_x_idx) * C + c]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <> void BilinearResize<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _BilinearResize_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _BilinearResize_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _BilinearResizeGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[NCHT * W + left_x_idx], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[NCHT * W + right_x_idx], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[NCHB * W + left_x_idx], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[NCHB * W + right_x_idx], static_cast<T>(x_lerp * dbottom)); } } template <typename T> __global__ void _BilinearResizeGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[(NHT * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[(NHT * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[(NHB * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[(NHB * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dbottom)); } } template <> void BilinearResizeGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; math::Set<float, CUDAContext>(N * C * H * W, 0, dx); if (data_format == "NCHW") { _BilinearResizeGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _BilinearResizeGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.conv ********************/ template<typename T> __global__ void _Im2Col2d_NCHW(const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % col_w; const int h_idx = idx / col_w; const int h = h_idx % col_h; const int im_c = h_idx / col_h; const int c = im_c * kernel_h * kernel_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; T* col_ptr = col; col_ptr += ((c * col_h + h) * col_w + w); const T* im_ptr = im; im_ptr += ((im_c * H + im_h_off) * W + im_w_off); for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; *col_ptr = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im_ptr[kh * dilation_h * W + kw * dilation_w] : 0; col_ptr += (col_h * col_w); } } } } template<typename T> __global__ void _Im2Col2d_NHWC(const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % col_w; const int h = idx / C / col_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; const int base_col_idx = (h * col_w) + w; for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; const int col_idx = (((base_col_idx * kernel_h + kh) * kernel_w + kw) * C + c); col[col_idx] = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im[(im_h * W + im_w) * C + c] : 0; } } } } template <> void Im2Col2d<float, CUDAContext>(const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* im, float* col) { if (data_format == "NCHW") { const int count = (C * col_h * col_w); _Im2Col2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else if (data_format == "NHWC") { const int count = (col_h * col_w * C); _Im2Col2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Col2Im2d_NCHW(const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_KERNEL_LOOP(idx, count) { T val = 0; const int im_w = idx % W + pad_w; const int im_h = (idx / W) % H + pad_h; const int im_c = idx / W / H; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (((im_c * kernel_h + kh_off) * kernel_w + kw_off) * col_h + h) * col_w + w; val += col[col_idx]; } } } im[idx] = val; } } template<typename T> __global__ void _Col2Im2d_NHWC(const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_KERNEL_LOOP(idx, count) { T val = 0; const int im_c = idx % C; const int im_w = (idx / C) % W + pad_w; const int im_h = (idx / C / W) + pad_h; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (((h * col_w + w) * kernel_h + kh_off) * kernel_w + kw_off) * C + im_c; val += col[col_idx]; } } } im[idx] = val; } } template <> void Col2Im2d<float, CUDAContext>(const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* col, float* im) { if (data_format == "NCHW") { const int count = (C * H * W); _Col2Im2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else if (data_format == "NHWC") { const int count = (H * W * C); _Col2Im2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.nn_resize ********************/ template <typename T> __global__ void _NNResize_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * C + c) * H + h_in) * W + w_in]; } } template <typename T> __global__ void _NNResize_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * H + h_in) * W + w_in) * C + c]; } } template <> void NNResize<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _NNResize_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _NNResize_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _NNResizeGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[idx]); } } template <typename T> __global__ void _NNResizeGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[idx]); } } template <> void NNResizeGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; math::Set<float, CUDAContext>(N * C * H * W, 0, dx); if (data_format == "NCHW") { _NNResizeGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _NNResizeGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.pooling ********************/ template<typename T> __global__ void _MAXPooling2d_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; const T* x_ptr = x + (pn * C + pc) * H * W; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { if (x_ptr[h * W + w] > max_val) { max_idx = h * W + w; max_val = x_ptr[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<typename T> __global__ void _MAXPooling2d_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { const int x_idx = ((pn * H + h) * W + w) * C + pc; if (x[x_idx] > max_val) { max_idx = x_idx; max_val = x[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void MAXPooling2d<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, int* mask, float* y) { if (data_format == "NCHW") { _MAXPooling2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else if (data_format == "NHWC") { _MAXPooling2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _AVGPooling2d_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const T* x_ptr = x + (pn * C + pc) * H * W; const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { avg_val += x_ptr[h * W + w]; } } y[idx] = avg_val / pool_area; } } template<typename T> __global__ void _AVGPooling2d_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) for (int w = start_w; w < end_w; ++w) avg_val += x[((pn * H + h) * W + w) * C + pc]; y[idx] = avg_val / pool_area; } } template<> void AVGPooling2d<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _AVGPooling2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else if (data_format == "NHWC") { _AVGPooling2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _MAXPooling2dGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; const int offset = (n * C + c) * pool_h * pool_w; const T* dy_ptr = dy + offset; const int* mask_ptr = mask + offset; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { if (mask_ptr[ph * pool_w + pw] == (h * W + w)) { grad += dy_ptr[ph * pool_w + pw]; } } } dx[idx] = grad; } } template<typename T> __global__ void _MAXPooling2dGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { const int x_idx = ((n * H + h) * W + w) * C + c; const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; if (mask[y_idx] == x_idx) grad += dy[y_idx]; } } dx[idx] = grad; } } template<> void MAXPooling2dGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, const int* mask, float* dx) { if (data_format == "NCHW") { _MAXPooling2dGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else if (data_format == "NHWC") { _MAXPooling2dGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _AVGPooling2dGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; const T* dy_ptr = dy + (n * C + c) * pool_h * pool_w; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); grad += (dy_ptr[ph * pool_w + pw] / pool_area); } } dx[idx] = grad; } } template<typename T> __global__ void _AVGPooling2dGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; grad += (dy[y_idx] / pool_area); } } dx[idx] = grad; } } template<> void AVGPooling2dGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, float* dx) { if (data_format == "NCHW") { _AVGPooling2dGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else if (data_format == "NHWC") { _AVGPooling2dGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.roi_pooling ********************/ template <typename T> __global__ void _ROIPooling(const int count, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* x, const T* roi, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; roi += n * 5; int im_idx = roi[0]; int x1 = round(roi[1] * spatial_scale); int y1 = round(roi[2] * spatial_scale); int x2 = round(roi[3] * spatial_scale); int y2 = round(roi[4] * spatial_scale); int roi_height = max(y2 - y1 + 1, 1); int roi_width = max(x2 - x1 + 1, 1); const float bin_size_h = (float)roi_height / (float)pool_h; const float bin_size_w = (float)roi_width / (float)pool_w; int start_h = floor(bin_size_h * ph); int start_w = floor(bin_size_w * pw); int end_h = ceil(bin_size_h * (ph + 1)); int end_w = ceil(bin_size_w * (pw + 1)); start_h = min(max(start_h + y1, 0), height); start_w = min(max(start_w + x1, 0), width); end_h = min(max(end_h + y1, 0), height); end_w = min(max(end_w + x1, 0), width); bool is_empty = (end_h <= start_h) || (end_w <= start_w); float max_val = is_empty ? 0 : -FLT_MAX; int max_idx = -1; x += ((im_idx * channels + c) * height * width); for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { const int x_idx = h * width + w; if (x[x_idx] > max_val) { max_val = x[x_idx]; max_idx = x_idx; } } //end w } // end h y[idx] = max_val; mask[idx] = max_idx; } } template<> void ROIPooling<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* x, Tensor* roi, Tensor* mask, Tensor* y) { auto* Xdata = x->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Mdata = mask->mutable_data<int, CUDAContext>(); TIndex channels = x->dim(1), count = y->count(); TIndex height = x->dim(2), width = x->dim(3); _ROIPooling<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, spatial_scale, channels, height, width, pool_h, pool_w, Xdata, Rdata, Mdata, Ydata); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _ROIPoolingGrad(const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* dy, const T* roi, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int w = idx % width; int h = (idx / width) % height; int c = (idx / width / height) % channels; int im_idx = idx / width / height / channels; T diff = 0; for (int n = 0; n < num_rois; ++n) { const T* cur_roi = roi + n * 5; const int im_idx_spec = cur_roi[0]; // ignore wrong im_batch_idx if (im_idx != im_idx_spec) continue; int x1 = round(cur_roi[1] * spatial_scale); int y1 = round(cur_roi[2] * spatial_scale); int x2 = round(cur_roi[3] * spatial_scale); int y2 = round(cur_roi[4] * spatial_scale); const bool is_in = (w >= x1 && w <= x2 && h >= y1 && h <= y2); if (!is_in) continue; int roi_height = max(y2 - y1 + 1, 1); int roi_width = max(x2 - x1 + 1, 1); const float bin_size_h = (float)roi_height / (float)pool_h; const float bin_size_w = (float)roi_width / (float)pool_w; int start_ph = floor((h - y1) / bin_size_h); int start_pw = floor((w - x1) / bin_size_w); int end_ph = ceil((h + 1 - y1) / bin_size_h); int end_pw = ceil((w + 1 - x1) / bin_size_w); start_ph = min(max(start_ph, 0), pool_h); start_pw = min(max(start_pw, 0), pool_w); end_ph = min(max(end_ph, 0), pool_h); end_pw = min(max(end_pw, 0), pool_w); int y_offset = (n * channels + c) * pool_h * pool_w; const T* dy_off = dy + y_offset; const int* mask_off = mask + y_offset; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int pool_idx = ph * pool_w + pw; if (mask_off[pool_idx] == (h * width + w)) { diff += dy_off[pool_idx]; } } // end pw } // end ph } // end n dx[idx] = diff; } } template<> void ROIPoolingGrad<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* dy, Tensor* roi, Tensor* mask, Tensor* dx) { auto* dYdata = dy->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Mdata = mask->data<int, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); TIndex channels = dx->dim(1), count = dx->count(); TIndex height = dx->dim(2), width = dx->dim(3); _ROIPoolingGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, roi->dim(0), spatial_scale, channels, height, width, pool_h, pool_w, dYdata, Rdata, Mdata, dXdata); CUDA_POST_KERNEL_CHECK; } /******************** vision.roi_align ********************/ template <typename T> __global__ void _ROIAlign(const int count, const float spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* x, const T* roi, T* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; roi += n * 5; int roi_batch_ind = roi[0]; T roi_start_w = (roi[1]) * spatial_scale; T roi_start_h = (roi[2]) * spatial_scale; T roi_end_w = (roi[3]) * spatial_scale; T roi_end_h = (roi[4]) * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1)); T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); T hstart = static_cast<T>((ph)* bin_size_h); T wstart = static_cast<T>((pw)* bin_size_w); T hend = static_cast<T>((ph + 1) * bin_size_h); T wend = static_cast<T>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); hend = min(max(hend + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); wstart = min(max(wstart + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); wend = min(max(wend + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); T maxval = is_empty ? 0 : -FLT_MAX; int maxidx = -1; int x_idx = 0; x += (roi_batch_ind * channels + c) * height * width; T h_stride = (hend - hstart) / 3.0; T w_stride = (wend - wstart) / 3.0; for (T h = hstart + h_stride; h <= hend - h_stride + 0.01; h += max(h_stride, 0.01)) { for (T w = wstart + w_stride; w <= wend - w_stride + 0.01; w += max(w_stride, 0.01)) { x_idx++; int hlow = min(max(static_cast<int>(floor(h)), 0), height - 1); int hhigh = min(hlow + 1, height - 1); int wleft = min(max(static_cast<int>(floor(w)), 0), width - 1); int wright = min(wleft + 1, width - 1); int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; T alpha = (hlow == hhigh) ? static_cast<T>(0.5) : (h - hlow) / (hhigh - hlow); T beta = (wleft == wright) ? static_cast<T>(0.5) : (w - wleft) / (wright - wleft); T value = (1 - alpha) * (1 - beta) * x[topleft] + alpha * (1 - beta) * x[bottomleft] + (1 - alpha) * beta * x[topright] + alpha * beta * x[bottomright]; if (value > maxval) { maxval = value; maxidx = x_idx; } } } y[idx] = maxval; mask[idx] = maxidx; } } template<> void ROIAlign<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* x, Tensor* roi, Tensor* mask, Tensor* y) { auto* Xdata = x->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Mdata = mask->mutable_data<float, CUDAContext>(); TIndex channels = x->dim(1), count = y->count(); TIndex height = x->dim(2), width = x->dim(3); _ROIAlign<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, spatial_scale, channels, height, width, pool_h, pool_w, Xdata, Rdata, Mdata, Ydata); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _ROIAlignGrad(const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* dy, const T* roi, const T* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int w = idx % width; int h = (idx / width) % height; int c = (idx / width / height) % channels; int n = idx / width / height / channels; T gradient = 0; for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const T* offset_roi = roi + roi_n * 5; int roi_batch_ind = offset_roi[0]; if (n != roi_batch_ind) continue; T roi_start_w = (offset_roi[1]) * spatial_scale; T roi_start_h = (offset_roi[2]) * spatial_scale; T roi_end_w = (offset_roi[3]) * spatial_scale; T roi_end_h = (offset_roi[4]) * spatial_scale; const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) continue; int offset = (roi_n * channels + c) * pool_h * pool_w; const T* offset_dy = dy + offset; const T* offset_mask = mask + offset; T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1)); T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); for (int ph = 0; ph < pool_h; ++ph) { for (int pw = 0; pw < pool_w; ++pw) { T hstart = static_cast<T>((ph)* bin_size_h); T wstart = static_cast<T>((pw)* bin_size_w); T hend = static_cast<T>((ph + 1) * bin_size_h); T wend = static_cast<T>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); hend = min(max(hend + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); wstart = min(max(wstart + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); wend = min(max(wend + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); bool in_bin = (w > wstart - 1.0 && w < wend + 1.0 && h > hstart - 1.0 && h < hend + 1.0); if (!in_bin) continue; const int pool_idx = ph * pool_w + pw; int x_idx = 0; T h_stride = (hend - hstart) / 3.0; T w_stride = (wend - wstart) / 3.0; for (T rh = hstart + h_stride; rh <= hend - h_stride + 0.01; rh += max(h_stride, 0.01)) { for (T rw = wstart + w_stride; rw <= wend - w_stride + 0.01; rw += max(w_stride, 0.01)) { x_idx++; if (offset_mask[pool_idx] != x_idx) continue; int hlow = min(max(static_cast<int>(floor(rh)), 0), height - 1); int hhigh = min(hlow + 1, height - 1); int wleft = min(max(static_cast<int>(floor(rw)), 0), width - 1); int wright = min(wleft + 1, width - 1); if (h != hlow && h != hhigh && w != wleft && w != wright) continue; T alpha = (hlow == hhigh) ? static_cast<T>(0.5) : (rh - hlow) / (hhigh - hlow); T beta = (wleft == wright) ? static_cast<T>(0.5) : (rw - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_dy[pool_idx] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_dy[pool_idx] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_dy[pool_idx] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_dy[pool_idx] * alpha * beta; } } } } } dx[idx] = gradient; } } template<> void ROIAlignGrad<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* dy, Tensor* roi, Tensor* mask, Tensor* dx) { auto* dYdata = dy->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Mdata = mask->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); TIndex channels = dx->dim(1), count = dx->count(); TIndex height = dx->dim(2), width = dx->dim(3); _ROIAlignGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, roi->dim(0), spatial_scale, channels, height, width, pool_h, pool_w, dYdata, Rdata, Mdata, dXdata); CUDA_POST_KERNEL_CHECK; } } // namespace kernel } // namespace dragon #endif // WITH_CUDA
02aed3b0f240d7fef138cdea037a6ef732c801a7.cu
#ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" namespace dragon { namespace kernel { template <typename T> __global__ void _Empty() { } template<> void Empty<float, CUDAContext>() { _Empty<float> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } template<> void Empty<float16, CUDAContext>() { _Empty<float16> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } /******************** activation.dropout ********************/ template<typename T> __global__ void _Dropout(const int count, const uint32_t thresh, const T scale, const T* x, const uint32_t* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] * (mask[idx] > thresh) * scale; } } template<> void Dropout<float, CUDAContext>(const int count, float prob, float scale, const float* x, uint32_t* mask, float* y, CUDAContext* context) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); math::RandomUniform<uint32_t, CUDAContext>(count, float(0), float(UINT_MAX), mask); _Dropout<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, x, mask, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _DropoutGrad(const int count, const uint32_t thresh, const T scale, const T* dy, const uint32_t* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * (mask[idx] > thresh) * scale; } } template<> void DropoutGrad<float, CUDAContext>(const int count, float prob, float scale, const float* dy, const uint32_t* mask, float* dx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); _DropoutGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, dy, mask, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.prelu ********************/ template <typename T> __global__ void _PRelu(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[0]; } } template <typename T> __global__ void _PReluNCHW(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template <typename T> __global__ void _PReluNHWC(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template<> void PRelu<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* x, const float* w, float* y) { if (channel_shared) { _PRelu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else { if (data_format == "NCHW") { _PReluNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else if (data_format == "NHWC") { _PReluNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluGrad(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[0]); } } template <typename T> __global__ void _PReluGradNCHW(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template <typename T> __global__ void _PReluGradNHWC(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template<> void PReluGrad<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* w, float* dx) { if (channel_shared) { _PReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else { if (data_format == "NCHW") { _PReluGradNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else if (data_format == "NHWC") { _PReluGradNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluWGradBcast(const int count, const int rows, const int row_offset, const T* dy, const T* x, T* bcast_dw) { CUDA_KERNEL_LOOP(idx, count) { bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0); for (int n = 1; n < rows; n++) { const int cur_idx = idx + n * row_offset; bcast_dw[idx] += dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0); } } } template<> void PReluWGrad<float, CUDAContext>(const int rows, const int row_offset, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* multiplier, float* bcast_dw, float* dw) { const int cdim = channels * dim; _PReluWGradBcast<float> << < GET_BLOCKS(cdim), CUDA_NUM_THREADS >> >(cdim, rows, row_offset, dy, x, bcast_dw); CUDA_POST_KERNEL_CHECK; if (channel_shared) { float w_sum = math::Dot<float, CUDAContext>(channels * dim, bcast_dw, multiplier); math::AddScalar<float, CUDAContext>(1, w_sum, dw); } else { if (data_format == "NCHW") { math::Gemv<float, CUDAContext>(CblasNoTrans, channels, dim, 1.0, bcast_dw, multiplier, 1.0, dw); } else if (data_format == "NHWC") { math::Gemv<float, CUDAContext>(CblasTrans, dim, channels, 1.0, bcast_dw, multiplier, 1.0, dw); } else LOG(FATAL) << "Unknown data format: " << data_format; } } /******************** activation.elu ********************/ template <typename T> __global__ void _Elu(const int count, const T* x, const float alpha, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : alpha * (std::exp(x[idx]) - 1); } } template<> void Elu<float, CUDAContext>(const int count, const float* x, const float alpha, float* y) { _Elu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, alpha, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _EluGrad(const int count, const T* dy, const T* y, const float alpha, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0)); } } template<> void EluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float alpha, float* dx) { _EluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, alpha, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.relu ********************/ template <typename T> __global__ void _Relu(const int count, const T* x, const float slope, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope; } } template<> void Relu<float, CUDAContext>(const int count, const float* x, const float slope, float* y) { _Relu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, slope, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ReluHalf(const int count, const half* x, const float slope, half* y) { const half kSlope = __float2half(slope); const half kZero = __float2half(0.0); CUDA_KERNEL_LOOP(idx, count) { #if __CUDA_ARCH__ >= 530 y[idx] = __hgt(x[idx], kZero) ? x[idx] : __hmul(x[idx], kSlope); #endif } } template<> void Relu<float16, CUDAContext>(const int count, const float16* x, const float slope, float16* y) { _ReluHalf<half> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(x), slope, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ReluGrad(const int count, const T* dy, const T* y, const float slope, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + slope * (y[idx] <= 0)); } } template<> void ReluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float slope, float* dx) { _ReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, slope, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.selu ********************/ template <typename T> __global__ void _SElu(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? 1.0507 * x[idx] : 1.7581 * (std::exp(x[idx]) - 1); } } template<> void SElu<float, CUDAContext>(const int count, const float* x, float* y) { _SElu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SEluGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] : (1.7581 + y[idx]) * dy[idx]; } } template<> void SEluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SEluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.sigmoid ********************/ template <typename T> __device__ T _SigmoidUnit(const T x) { return T(1) / (T(1) + exp(-x)); } template <typename T> __global__ void _Sigmoid(const int n, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, n) { y[idx] = _SigmoidUnit<T>(x[idx]); } } template<> void Sigmoid<float, CUDAContext>(const int count, const float* x, float* y) { _Sigmoid<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SigmoidGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * y[idx] * (1 - y[idx]); } } template<> void SigmoidGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SigmoidGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.softmax ********************/ template <typename T> __global__ void _SoftmaxMaxClass(const int outer_dim, const int classes, const int inner_dim, const T* x, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T max_val = -FLT_MAX; for (int c = 0; c < classes; c++) max_val = max(x[(o_idx * classes + c) * inner_dim + i_idx], max_val); scale[idx] = max_val; } } template <typename T> __global__ void _SoftmaxSubtract(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] -= scale[o_idx * inner_dim + i_idx]; } } template <typename T> __global__ void _SoftmaxExp(const int count, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = std::exp(y[idx]); } } template <typename T> __global__ void _SoftmaxSumClass(const int outer_dim, const int classes, const int inner_dim, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T sum = 0; for (int c = 0; c < classes; c++) sum += y[(o_idx * classes + c) * inner_dim + i_idx]; scale[idx] = sum; } } template <typename T> __global__ void _SoftmaxDiv(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] /= scale[o_idx * inner_dim + i_idx]; } } template<> void Softmax<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* x, float* scale, float* y, CUDAContext* context) { const int num_preds = inner_dim * outer_dim; _SoftmaxMaxClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, x, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); _SoftmaxExp<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, y); _SoftmaxSumClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, y, scale); _SoftmaxDiv<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SoftmaxDot(const int outer_dim, const int classes, const int inner_dim, const T* dy, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T dot = 0; for (int c = 0; c < classes; c++) dot += (y[(o_idx * classes + c) * inner_dim + i_idx] * dy[(o_idx * classes + c) * inner_dim + i_idx]); scale[idx] = dot; } } template<> void SoftmaxGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* dy, const float* y, float* scale, float* dx) { const int num_preds = inner_dim * outer_dim; _SoftmaxDot<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, dy, y, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, dx); math::Mul<float, CUDAContext>(count, dx, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.tanh ********************/ template <typename T> __global__ void _Tanh(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(i, count) { y[i] = std::tanh(x[i]); } } template<> void Tanh<float, CUDAContext>(const int count, const float* x, float* y) { _Tanh<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TanhGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(i, count) { dx[i] = dy[i] * (1 - y[i] * y[i]); } } template<> void TanhGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _TanhGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** arithmetic.bias_add ********************/ template <typename T> __global__ void _BiasAdd_NCHW(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int bias_idx = (idx / inner_dim) % dim; y[idx] += bias[bias_idx]; } } template <typename T> __global__ void _BiasAdd_NHWC(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] += bias[idx % dim]; } } template<> void BiasAdd<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const string& data_format, const float* bias, const float* bias_multiplier, float* y) { if (data_format == "NCHW") { _BiasAdd_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else if (data_format == "NHWC") { _BiasAdd_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** arithmetic.clip ********************/ template <typename T> __global__ void _Clip(const int count, const T low, const T high, const T* x, T* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { mask[idx] = 1.0; if (x[idx] > high || x[idx] < low) mask[idx] = 0.0; y[idx] = x[idx] > high ? high : x[idx]; y[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>(const int count, const float low, const float high, const float* x, float* mask, float* y) { _Clip<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, low, high, x, mask, y); } /******************** arithmetic.scale ********************/ template <typename T> __global__ void _ScaleWithoutBias(const int n, const T* x, const T* scale, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx]; } } template <typename T> __global__ void _ScaleWithBias(const int n, const T* x, const T* scale, const T* bias, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx] + bias[scale_idx]; } } template<> void Scale<float, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, Bdata, scale_dim, inner_dim, Ydata); else _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, scale_dim, inner_dim, Ydata); } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ScaleWithoutBiasHalf(const int n, const half* x, const half* scale, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hmul(x[idx], scale[scale_idx]); #endif } } template <typename T> __global__ void _ScaleWithBiasHalf(const int n, const half* x, const half* scale, const half* bias, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hadd(__hmul(x[idx], scale[scale_idx]), bias[scale_idx]); #endif } } template<> void Scale<float16, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float16, CUDAContext>(); auto* Ydata = y->mutable_data<float16, CUDAContext>(); auto* Sdata = gamma->data<float16, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float16, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), reinterpret_cast<const half*>(Bdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); else _ScaleWithoutBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); } #endif template <> void ScaleGrad<float, CUDAContext>(const int axis, Tensor* dy, Tensor* gamma, Tensor* dx) { const int count = dx->count(); const int inner_dim = dx->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* dYdata = dy->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dYdata, Sdata, scale_dim, inner_dim, dXdata); } /******************** cast.float2half ********************/ #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _FloatToHalfKernel(const int count, const float* x, half* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = __float2half(x[idx]); } } template <> void Float2Half<float, CUDAContext>(const int count, const float* x, float16* y) { _FloatToHalfKernel<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif /******************** control_flow.compare ********************/ template <typename T> __global__ void _Equal(const int count, const T* a, const T* b, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0; } } template <> void Equal<float, CUDAContext>(const int count, const float* a, const float* b, float* y) { _Equal<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, a, b, y); CUDA_POST_KERNEL_CHECK; } /******************** loss.l1_loss ********************/ template <typename T> __global__ void _AbsGrad(const int count, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; // val > 0: 1 | val == 0: 0 | val < 0: -1 dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void AbsGrad<float, CUDAContext>(const int count, const float* dy, float* dx) { _AbsGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.sigmoid_cross_entropy ********************/ template <typename T> __global__ void _SigmoidCrossEntropy(const int count, const T* x, const T* targets, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = std::log(1 + std::exp(x[idx] - 2 * x[idx] * (x[idx] >= 0))) + x[idx] * ((x[idx] >= 0) - targets[idx]); } } template <> void SigmoidCrossEntropy<float, CUDAContext>(const int count, const float* x, const float* targets, float* loss) { _SigmoidCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, targets, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.smooth_l1_loss ********************/ template <typename T> __global__ void _SmoothL1(const int count, const float sigma2, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const T val = x[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) y[idx] = 0.5 * val * val * sigma2; else y[idx] = abs_val - 0.5 / sigma2; } } template<> void SmoothL1<float, CUDAContext>(const int count, const float sigma2, const float* x, float* y) { _SmoothL1<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SmoothL1Grad(const int count, const float sigma2, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) dx[idx] = val * sigma2; // val > 0: 1 | val == 0: 0 | val < 0: -1 else dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void SmoothL1Grad<float, CUDAContext>(const int count, const float sigma2, const float* dy, float* dx) { _SmoothL1Grad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.softmax_cross_entropy ********************/ template <typename T> __global__ void _SoftmaxCrossEntropy(const int count, const T* prob, const T* target, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN)); } } template <> void SoftmaxCrossEntropy<float, CUDAContext>(const int count, const float* prob, const float* target, float* loss) { _SoftmaxCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, prob, target, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_cross_entropy ********************/ template <typename T> __global__ void _SparseSoftmaxCrossEntropy(const int count, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { loss[idx] = -log(max(prob[(o_idx * classes + label) * inner_dim + i_idx], FLT_MIN)); valid[idx] = 1; } } } template <> void SparseSoftmaxCrossEntropy<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxCrossEntropyGrad(const int count, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { dx[(o_idx * classes + label) * inner_dim + i_idx] -= 1; valid[idx] = 1; } } } template<> void SparseSoftmaxCrossEntropyGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_focal_loss ********************/ template <typename T> __global__ void _SparseSoftmaxFocalScale(const int count, const float gamma, const T* prob, T* scale) { CUDA_KERNEL_LOOP(idx, count) { scale[idx] = std::pow((1.0f - prob[idx]), gamma); } } template <typename T> __global__ void _SparseSoftmaxFocalLoss(const int count, const float pos_alpha, const float neg_alpha, const int neg_id, T* scale, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; scale[t_] = label > neg_id ? pos_alpha * scale[t_] : neg_alpha * scale[t_]; loss[idx] = -scale[t_] * std::log(max(prob[t_], FLT_MIN)); valid[idx] = label > neg_id ? 1 : 0; } } } template <> void SparseSoftmaxFocalLoss<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, float* scale, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalScale<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, gamma, prob, scale); _SparseSoftmaxFocalLoss<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, pos_alpha, neg_alpha, neg_id, scale, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxFocalLossGrad(const int count, const float gamma, const int neg_id, const float eps, const T* scale, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; T grad = -gamma * (scale[t_] / max((1.0f - prob[t_]), eps)) * std::log(max(prob[t_], FLT_MIN)) * prob[t_] + scale[t_]; for (int c = 0; c < classes; c++) { const int i_ = (o_idx * classes + c) * inner_dim + i_idx; if (c == label) { dx[i_] = grad * (prob[t_] - 1); } else { dx[i_] = grad * prob[i_]; } } valid[idx] = label > neg_id ? 1 : 0; } } } template<> void SparseSoftmaxFocalLossGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float gamma, const int neg_id, const float eps, const float* scale, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalLossGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, gamma, neg_id, eps, scale, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** misc.image_data ********************/ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; float raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; float raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <> void ImageData<float, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ImageData<float, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Arange(const int count, const int start, const int step, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = start + idx * step; } } template<> void Arange<float, CUDAContext>(const int count, const int start, const int step, float* y) { _Arange<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } template<> void Arange<int, CUDAContext>(const int count, const int start, const int step, int* y) { _Arange<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Argmax(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T max_val = -FLT_MAX; int max_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val > max_val) { max_val = val; max_idx = j; } } y[idx] = max_idx; } } template<> void Argmax<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmax<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmin ********************/ template <typename T> __global__ void _Argmin(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T min_val = FLT_MAX; int min_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val < min_val) { min_val = val; min_idx = j; } } y[idx] = min_idx; } } template<> void Argmin<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmin<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.at ********************/ template <typename T> __global__ void _CanonicalAxis(const int count, const int dim, T* y) { CUDA_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<float, CUDAContext>(const int count, const int dim, float* y) { _CanonicalAxis<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _At(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const T* indices, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void At<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const float* indices, const float* x, float* y, CUDAContext* context) { _At<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _AtGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const T* indices, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } template <> void AtGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const float* indices, const float* dy, float* dx, CUDAContext* context) { _AtGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.concat ********************/ template <typename T> __global__ void _Concat(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; y[y_idx] = x[idx]; } } template <> void Concat<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* x, float* y, CUDAContext* context) { _Concat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Concat<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* x, float16* y, CUDAContext* context) { _Concat<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ConcatGrad(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; dx[idx] = dy[y_idx]; } } template <> void ConcatGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* dy, float* dx, CUDAContext* context) { _ConcatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ConcatGrad<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* dy, float16* dx, CUDAContext* context) { _ConcatGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.crop ********************/ template<typename T> __global__ void _Crop1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; y[idx] = x[(o * dim + ex_d + start) * inner_dim + i]; } } template<> void Crop1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const float* x, float* y, CUDAContext* context) { _Crop1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, x, y); CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Crop1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int d = (idx / inner_dim) % dim; const int o = idx / inner_dim / dim; if (d >= start && d < end) dx[idx] = dy[(o * ex_dim + d - start) * inner_dim + i]; } } template<> void Crop1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const float* dy, float* dx, CUDAContext* context) { _Crop1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.pad ********************/ template <typename T> __global__ void _ConstPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T value, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = ex_d - pad_l; y[idx] = (d < 0 || d >= dim) ? value : x[(o * dim + d) * inner_dim + i]; } } template <> void ConstPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float value, const float* x, float* y, CUDAContext* context) { _ConstPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, value, x, y); } template <typename T> __global__ void _ReflectPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void ReflectPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _ReflectPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _EdgePad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void EdgePad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _EdgePad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _ConstPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % dim + pad_l; const int o = idx / inner_dim / dim; dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i]; } } template <> void ConstPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _ConstPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _ReflectPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void ReflectPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx) { _ReflectPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _EdgePad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void EdgePad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _EdgePad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } /******************** ndarray.one_hot ********************/ template <typename T> __global__ void _OneHot(const int count, const int depth, const int on_value, const float* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { const int val = x[idx]; y[idx * depth + val] = on_value; } } template <> void OneHot<float, CUDAContext>(const int count, const int depth, const int on_value, const float* x, float* y) { _OneHot<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, depth, on_value, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.reduce ********************/ template <typename T> __global__ void _Sum(const int count, const int axis_dim, const int inner_dim, const T* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { T sum_val = 0.0; for (int j = 0; j < axis_dim; j++) sum_val += x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; y[idx] = sum_val; } } template<> void Sum<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float* x, float* y) { _Sum<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SumGrad(const int count, const int axis_dim, const int inner_dim, const T coeff, const T* dy, float* dx) { CUDA_KERNEL_LOOP(idx, count) { for (int j = 0; j < axis_dim; j++) dx[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim] = dy[idx] * coeff; } } template<> void SumGrad<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float coeff, const float* dy, float* dx) { _SumGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, coeff, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.repeat ********************/ template <typename T> __global__ void _Repeat(const int count, const int inner_dim, const int repeats, const int dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim / repeats) % dim; const int n = idx / inner_dim / repeats / dim; const int x_idx = (n * dim + b) * inner_dim + d; y[idx] = x[x_idx]; } } template <> void Repeat<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* x, float* y, CUDAContext* context) { _Repeat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _RepeatGrad(const int count, const int inner_dim, const int repeats, const int dim, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim) % dim; const int n = idx / inner_dim / dim; T gradient = 0; for (int t = 0; t < repeats; t++) gradient += dy[(((n * dim + b) * repeats) + t) * inner_dim + d]; dx[idx] = gradient; } } template <> void RepeatGrad<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* dy, float* dx, CUDAContext* context) { _RepeatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.slice ********************/ template <typename T> __global__ void _Slice(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Slice<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* x, float* y, CUDAContext* context) { _Slice<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SliceGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; dx[x_idx] = dy[idx]; } } template <> void SliceGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* dy, float* dx, CUDAContext* context) { _SliceGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.tile ********************/ template <typename T> __global__ void _Tile(const int count, const int ex_inner_dim, const int multiple, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim / multiple; const int x_idx = n * ex_inner_dim + d; y[idx] = x[x_idx]; } } template <> void Tile<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* x, float* y, CUDAContext* context) { _Tile<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TileGrad(const int count, const int ex_inner_dim, const int multiple, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim; T gradient = 0; for (int t = 0; t < multiple; t++) gradient += dy[(n * multiple + t) * ex_inner_dim + d]; dx[idx] = gradient; } } template <> void TileGrad<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* dy, float* dx, CUDAContext* context) { _TileGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.transpose ********************/ template <typename T> __global__ void _Transpose(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } y[idx] = x[x_idx]; } } template <> void Transpose<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* x, float* y) { _Transpose<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Transpose<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* x, float16* y) { _Transpose<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _TransposeGrad(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } dx[x_idx] = dy[idx]; } } template <> void TransposeGrad<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* dy, float* dx) { _TransposeGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void TransposeGrad<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* dy, float16* dx) { _TransposeGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** recurrent.lstm_uint ********************/ template <typename T> __global__ void _LSTMUnitAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x, T* x_act) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; if (ch_4 < g_offset) x_act[idx] = _SigmoidUnit<float>(x[idx]); else x_act[idx] = std::tanh(x[idx]); } } template <typename T> __global__ void _LSTMUnit(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, T* x_act, const T* cont, T* c, T* h) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; T* x_act_ = x_act + n * x_offset; const T i = x_act_[ch]; if (cont != nullptr && cont[n] != T(1)) x_act_[channels + ch] *= cont[n]; const T f = x_act_[channels + ch]; const T o = x_act_[o_offset + ch]; const T g = x_act_[g_offset + ch]; const T c_ = c[idx] = f * c_1[idx] + i * g; h[idx] = o * std::tanh(c_); } } template <> void LSTMUnit<float, CUDAContext>(const int count, const int num, const int channels, const float* c_1, const float* x, const float* cont, float* x_act, float* c, float* h) { const int o_offset = 2 * channels, g_offset = 3 * channels; const int x_offset = 4 * channels, y_count = count / 4; _LSTMUnitAct<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, g_offset, x_offset, x, x_act); _LSTMUnit<float> << <GET_BLOCKS(y_count), CUDA_NUM_THREADS >> >(y_count, channels, o_offset, g_offset, x_offset, c_1, x_act, cont, c, h); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _LSTMUnitGrad(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, const T* x_act, const T* c, const T* dc, const T* dh, T* dc_1, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; const T* x_act_ = x_act + n * x_offset; T* dx_ = dx + n * x_offset; const T i = x_act_[ch]; const T f = x_act_[channels + ch]; const T o = x_act_[o_offset + ch]; const T g = x_act_[g_offset + ch]; T* p_di = dx_ + ch; T* p_df = dx_ + channels + ch; T* p_do = dx_ + o_offset + ch; T* p_dg = dx_ + g_offset + ch; const T tanh_c_t = tanh(c[idx]); const T dc_1_sum_term = dh[idx] * o * (1 - tanh_c_t * tanh_c_t) + dc[idx]; dc_1[idx] = dc_1_sum_term * f; *p_di = dc_1_sum_term * g; *p_df = dc_1_sum_term * c_1[idx]; *p_do = dh[idx] * tanh_c_t; *p_dg = dc_1_sum_term * i; } } template <typename T> __global__ void _LSTMUnitGradAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x_act, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; const T x_act_ = x_act[idx]; if (ch_4 < g_offset) dx[idx] = dx[idx] * x_act_ * (T(1) - x_act_); else dx[idx] = dx[idx] * (T(1) - x_act_ * x_act_); } } template <> void LSTMUnitGrad<float, CUDAContext>(const int count, const int num, const int channels, const float* c_1, const float* x_act, const float* c, const float* dc, const float* dh, float* dc_1, float* dx) { const int o_offset = 2 * channels, g_offset = 3 * channels; const int x_offset = 4 * channels, y_count = count / 4; _LSTMUnitGrad<float> << <GET_BLOCKS(y_count), CUDA_NUM_THREADS >> >(y_count, channels, o_offset, g_offset, x_offset, c_1, x_act, c, dc, dh, dc_1, dx); _LSTMUnitGradAct<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, g_offset, x_offset, x_act, dx); CUDA_POST_KERNEL_CHECK; } /******************** update.adam_update ********************/ template <typename T> __global__ void _AdamUpdate(const int n, T* g, T* m, T* v, const T beta1, const T beta2, const T eps, const T lr) { CUDA_KERNEL_LOOP(i, n) { T gi = g[i]; T mi = m[i] = m[i] * beta1 + gi * (1 - beta1); T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); g[i] = lr * mi / (sqrt(vi) + eps); } } template <> void AdamUpdate<float, CUDAContext>(Tensor* x, Tensor* m, Tensor* v, Tensor* t, const float beta1, const float beta2, const float eps, const float lr) { TIndex count = x->count(); auto* Xdata = x->mutable_data<float, CUDAContext>(); auto* Mdata = m->mutable_data<float, CUDAContext>(); auto* Vdata = v->mutable_data<float, CUDAContext>(); _AdamUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Mdata, Vdata, beta1, beta2, eps, lr); CUDA_POST_KERNEL_CHECK; } /******************** update.nesterov_update ********************/ template <typename T> __global__ void _NesterovUpdate(const int n, T* g, T* h, const T momentum, const T lr) { CUDA_KERNEL_LOOP(i, n) { T hi = h[i]; T hi_new = h[i] = momentum * hi + lr * g[i]; g[i] = (1 + momentum) * hi_new - momentum * hi; } } template <> void NesterovUpdate<float, CUDAContext>(const int count, float* x, float* h, Tensor* t, const float momentum, const float lr, CUDAContext* ctx) { _NesterovUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, h, momentum, lr); CUDA_POST_KERNEL_CHECK; } /******************** update.rmsprop_update ********************/ template <typename T> __global__ void _RMSPropUpdate(const int n, T* g, T* h, const T decay, const T eps, const T lr) { CUDA_KERNEL_LOOP(i, n) { T gi = g[i]; T hi = h[i] = decay * h[i] + (1 - decay) * gi * gi; g[i] = lr * g[i] / (sqrt(hi) + eps); } } template <> void RMSPropUpdate<float, CUDAContext>(const int count, float* x, float* h, Tensor* t, const float decay, const float eps, const float lr) { _RMSPropUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, h, decay, eps, lr); CUDA_POST_KERNEL_CHECK; } /******************** vision.bilinear_resize ********************/ template <typename T> __global__ void _BilinearResize_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float top_left(x[NCHT * W + left_x_idx]); const float top_right(x[NCHT * W + right_x_idx]); const float bottom_left(x[NCHB * W + left_x_idx]); const float bottom_right(x[NCHB * W + right_x_idx]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <typename T> __global__ void _BilinearResize_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float top_left(x[(NHT * W + left_x_idx) * C + c]); const float top_right(x[(NHT * W + right_x_idx) * C + c]); const float bottom_left(x[(NHB * W + left_x_idx) * C + c]); const float bottom_right(x[(NHB * W + right_x_idx) * C + c]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <> void BilinearResize<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _BilinearResize_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _BilinearResize_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _BilinearResizeGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[NCHT * W + left_x_idx], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[NCHT * W + right_x_idx], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[NCHB * W + left_x_idx], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[NCHB * W + right_x_idx], static_cast<T>(x_lerp * dbottom)); } } template <typename T> __global__ void _BilinearResizeGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[(NHT * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[(NHT * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[(NHB * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[(NHB * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dbottom)); } } template <> void BilinearResizeGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; math::Set<float, CUDAContext>(N * C * H * W, 0, dx); if (data_format == "NCHW") { _BilinearResizeGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _BilinearResizeGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.conv ********************/ template<typename T> __global__ void _Im2Col2d_NCHW(const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % col_w; const int h_idx = idx / col_w; const int h = h_idx % col_h; const int im_c = h_idx / col_h; const int c = im_c * kernel_h * kernel_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; T* col_ptr = col; col_ptr += ((c * col_h + h) * col_w + w); const T* im_ptr = im; im_ptr += ((im_c * H + im_h_off) * W + im_w_off); for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; *col_ptr = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im_ptr[kh * dilation_h * W + kw * dilation_w] : 0; col_ptr += (col_h * col_w); } } } } template<typename T> __global__ void _Im2Col2d_NHWC(const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % col_w; const int h = idx / C / col_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; const int base_col_idx = (h * col_w) + w; for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; const int col_idx = (((base_col_idx * kernel_h + kh) * kernel_w + kw) * C + c); col[col_idx] = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im[(im_h * W + im_w) * C + c] : 0; } } } } template <> void Im2Col2d<float, CUDAContext>(const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* im, float* col) { if (data_format == "NCHW") { const int count = (C * col_h * col_w); _Im2Col2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else if (data_format == "NHWC") { const int count = (col_h * col_w * C); _Im2Col2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Col2Im2d_NCHW(const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_KERNEL_LOOP(idx, count) { T val = 0; const int im_w = idx % W + pad_w; const int im_h = (idx / W) % H + pad_h; const int im_c = idx / W / H; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (((im_c * kernel_h + kh_off) * kernel_w + kw_off) * col_h + h) * col_w + w; val += col[col_idx]; } } } im[idx] = val; } } template<typename T> __global__ void _Col2Im2d_NHWC(const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_KERNEL_LOOP(idx, count) { T val = 0; const int im_c = idx % C; const int im_w = (idx / C) % W + pad_w; const int im_h = (idx / C / W) + pad_h; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (((h * col_w + w) * kernel_h + kh_off) * kernel_w + kw_off) * C + im_c; val += col[col_idx]; } } } im[idx] = val; } } template <> void Col2Im2d<float, CUDAContext>(const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* col, float* im) { if (data_format == "NCHW") { const int count = (C * H * W); _Col2Im2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else if (data_format == "NHWC") { const int count = (H * W * C); _Col2Im2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.nn_resize ********************/ template <typename T> __global__ void _NNResize_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * C + c) * H + h_in) * W + w_in]; } } template <typename T> __global__ void _NNResize_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * H + h_in) * W + w_in) * C + c]; } } template <> void NNResize<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _NNResize_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _NNResize_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _NNResizeGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[idx]); } } template <typename T> __global__ void _NNResizeGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[idx]); } } template <> void NNResizeGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; math::Set<float, CUDAContext>(N * C * H * W, 0, dx); if (data_format == "NCHW") { _NNResizeGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _NNResizeGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.pooling ********************/ template<typename T> __global__ void _MAXPooling2d_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; const T* x_ptr = x + (pn * C + pc) * H * W; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { if (x_ptr[h * W + w] > max_val) { max_idx = h * W + w; max_val = x_ptr[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<typename T> __global__ void _MAXPooling2d_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { const int x_idx = ((pn * H + h) * W + w) * C + pc; if (x[x_idx] > max_val) { max_idx = x_idx; max_val = x[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void MAXPooling2d<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, int* mask, float* y) { if (data_format == "NCHW") { _MAXPooling2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else if (data_format == "NHWC") { _MAXPooling2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _AVGPooling2d_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const T* x_ptr = x + (pn * C + pc) * H * W; const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { avg_val += x_ptr[h * W + w]; } } y[idx] = avg_val / pool_area; } } template<typename T> __global__ void _AVGPooling2d_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) for (int w = start_w; w < end_w; ++w) avg_val += x[((pn * H + h) * W + w) * C + pc]; y[idx] = avg_val / pool_area; } } template<> void AVGPooling2d<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _AVGPooling2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else if (data_format == "NHWC") { _AVGPooling2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _MAXPooling2dGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; const int offset = (n * C + c) * pool_h * pool_w; const T* dy_ptr = dy + offset; const int* mask_ptr = mask + offset; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { if (mask_ptr[ph * pool_w + pw] == (h * W + w)) { grad += dy_ptr[ph * pool_w + pw]; } } } dx[idx] = grad; } } template<typename T> __global__ void _MAXPooling2dGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { const int x_idx = ((n * H + h) * W + w) * C + c; const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; if (mask[y_idx] == x_idx) grad += dy[y_idx]; } } dx[idx] = grad; } } template<> void MAXPooling2dGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, const int* mask, float* dx) { if (data_format == "NCHW") { _MAXPooling2dGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else if (data_format == "NHWC") { _MAXPooling2dGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _AVGPooling2dGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; const T* dy_ptr = dy + (n * C + c) * pool_h * pool_w; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); grad += (dy_ptr[ph * pool_w + pw] / pool_area); } } dx[idx] = grad; } } template<typename T> __global__ void _AVGPooling2dGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; grad += (dy[y_idx] / pool_area); } } dx[idx] = grad; } } template<> void AVGPooling2dGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, float* dx) { if (data_format == "NCHW") { _AVGPooling2dGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else if (data_format == "NHWC") { _AVGPooling2dGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.roi_pooling ********************/ template <typename T> __global__ void _ROIPooling(const int count, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* x, const T* roi, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; roi += n * 5; int im_idx = roi[0]; int x1 = round(roi[1] * spatial_scale); int y1 = round(roi[2] * spatial_scale); int x2 = round(roi[3] * spatial_scale); int y2 = round(roi[4] * spatial_scale); int roi_height = max(y2 - y1 + 1, 1); int roi_width = max(x2 - x1 + 1, 1); const float bin_size_h = (float)roi_height / (float)pool_h; const float bin_size_w = (float)roi_width / (float)pool_w; int start_h = floor(bin_size_h * ph); int start_w = floor(bin_size_w * pw); int end_h = ceil(bin_size_h * (ph + 1)); int end_w = ceil(bin_size_w * (pw + 1)); start_h = min(max(start_h + y1, 0), height); start_w = min(max(start_w + x1, 0), width); end_h = min(max(end_h + y1, 0), height); end_w = min(max(end_w + x1, 0), width); bool is_empty = (end_h <= start_h) || (end_w <= start_w); float max_val = is_empty ? 0 : -FLT_MAX; int max_idx = -1; x += ((im_idx * channels + c) * height * width); for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { const int x_idx = h * width + w; if (x[x_idx] > max_val) { max_val = x[x_idx]; max_idx = x_idx; } } //end w } // end h y[idx] = max_val; mask[idx] = max_idx; } } template<> void ROIPooling<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* x, Tensor* roi, Tensor* mask, Tensor* y) { auto* Xdata = x->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Mdata = mask->mutable_data<int, CUDAContext>(); TIndex channels = x->dim(1), count = y->count(); TIndex height = x->dim(2), width = x->dim(3); _ROIPooling<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, spatial_scale, channels, height, width, pool_h, pool_w, Xdata, Rdata, Mdata, Ydata); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _ROIPoolingGrad(const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* dy, const T* roi, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int w = idx % width; int h = (idx / width) % height; int c = (idx / width / height) % channels; int im_idx = idx / width / height / channels; T diff = 0; for (int n = 0; n < num_rois; ++n) { const T* cur_roi = roi + n * 5; const int im_idx_spec = cur_roi[0]; // ignore wrong im_batch_idx if (im_idx != im_idx_spec) continue; int x1 = round(cur_roi[1] * spatial_scale); int y1 = round(cur_roi[2] * spatial_scale); int x2 = round(cur_roi[3] * spatial_scale); int y2 = round(cur_roi[4] * spatial_scale); const bool is_in = (w >= x1 && w <= x2 && h >= y1 && h <= y2); if (!is_in) continue; int roi_height = max(y2 - y1 + 1, 1); int roi_width = max(x2 - x1 + 1, 1); const float bin_size_h = (float)roi_height / (float)pool_h; const float bin_size_w = (float)roi_width / (float)pool_w; int start_ph = floor((h - y1) / bin_size_h); int start_pw = floor((w - x1) / bin_size_w); int end_ph = ceil((h + 1 - y1) / bin_size_h); int end_pw = ceil((w + 1 - x1) / bin_size_w); start_ph = min(max(start_ph, 0), pool_h); start_pw = min(max(start_pw, 0), pool_w); end_ph = min(max(end_ph, 0), pool_h); end_pw = min(max(end_pw, 0), pool_w); int y_offset = (n * channels + c) * pool_h * pool_w; const T* dy_off = dy + y_offset; const int* mask_off = mask + y_offset; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int pool_idx = ph * pool_w + pw; if (mask_off[pool_idx] == (h * width + w)) { diff += dy_off[pool_idx]; } } // end pw } // end ph } // end n dx[idx] = diff; } } template<> void ROIPoolingGrad<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* dy, Tensor* roi, Tensor* mask, Tensor* dx) { auto* dYdata = dy->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Mdata = mask->data<int, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); TIndex channels = dx->dim(1), count = dx->count(); TIndex height = dx->dim(2), width = dx->dim(3); _ROIPoolingGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, roi->dim(0), spatial_scale, channels, height, width, pool_h, pool_w, dYdata, Rdata, Mdata, dXdata); CUDA_POST_KERNEL_CHECK; } /******************** vision.roi_align ********************/ template <typename T> __global__ void _ROIAlign(const int count, const float spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* x, const T* roi, T* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; roi += n * 5; int roi_batch_ind = roi[0]; T roi_start_w = (roi[1]) * spatial_scale; T roi_start_h = (roi[2]) * spatial_scale; T roi_end_w = (roi[3]) * spatial_scale; T roi_end_h = (roi[4]) * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1)); T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); T hstart = static_cast<T>((ph)* bin_size_h); T wstart = static_cast<T>((pw)* bin_size_w); T hend = static_cast<T>((ph + 1) * bin_size_h); T wend = static_cast<T>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); hend = min(max(hend + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); wstart = min(max(wstart + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); wend = min(max(wend + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); T maxval = is_empty ? 0 : -FLT_MAX; int maxidx = -1; int x_idx = 0; x += (roi_batch_ind * channels + c) * height * width; T h_stride = (hend - hstart) / 3.0; T w_stride = (wend - wstart) / 3.0; for (T h = hstart + h_stride; h <= hend - h_stride + 0.01; h += max(h_stride, 0.01)) { for (T w = wstart + w_stride; w <= wend - w_stride + 0.01; w += max(w_stride, 0.01)) { x_idx++; int hlow = min(max(static_cast<int>(floor(h)), 0), height - 1); int hhigh = min(hlow + 1, height - 1); int wleft = min(max(static_cast<int>(floor(w)), 0), width - 1); int wright = min(wleft + 1, width - 1); int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; T alpha = (hlow == hhigh) ? static_cast<T>(0.5) : (h - hlow) / (hhigh - hlow); T beta = (wleft == wright) ? static_cast<T>(0.5) : (w - wleft) / (wright - wleft); T value = (1 - alpha) * (1 - beta) * x[topleft] + alpha * (1 - beta) * x[bottomleft] + (1 - alpha) * beta * x[topright] + alpha * beta * x[bottomright]; if (value > maxval) { maxval = value; maxidx = x_idx; } } } y[idx] = maxval; mask[idx] = maxidx; } } template<> void ROIAlign<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* x, Tensor* roi, Tensor* mask, Tensor* y) { auto* Xdata = x->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Mdata = mask->mutable_data<float, CUDAContext>(); TIndex channels = x->dim(1), count = y->count(); TIndex height = x->dim(2), width = x->dim(3); _ROIAlign<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, spatial_scale, channels, height, width, pool_h, pool_w, Xdata, Rdata, Mdata, Ydata); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _ROIAlignGrad(const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* dy, const T* roi, const T* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int w = idx % width; int h = (idx / width) % height; int c = (idx / width / height) % channels; int n = idx / width / height / channels; T gradient = 0; for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const T* offset_roi = roi + roi_n * 5; int roi_batch_ind = offset_roi[0]; if (n != roi_batch_ind) continue; T roi_start_w = (offset_roi[1]) * spatial_scale; T roi_start_h = (offset_roi[2]) * spatial_scale; T roi_end_w = (offset_roi[3]) * spatial_scale; T roi_end_h = (offset_roi[4]) * spatial_scale; const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) continue; int offset = (roi_n * channels + c) * pool_h * pool_w; const T* offset_dy = dy + offset; const T* offset_mask = mask + offset; T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1)); T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); for (int ph = 0; ph < pool_h; ++ph) { for (int pw = 0; pw < pool_w; ++pw) { T hstart = static_cast<T>((ph)* bin_size_h); T wstart = static_cast<T>((pw)* bin_size_w); T hend = static_cast<T>((ph + 1) * bin_size_h); T wend = static_cast<T>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); hend = min(max(hend + roi_start_h, static_cast<T>(0)), static_cast<T>(height)); wstart = min(max(wstart + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); wend = min(max(wend + roi_start_w, static_cast<T>(0)), static_cast<T>(width)); bool in_bin = (w > wstart - 1.0 && w < wend + 1.0 && h > hstart - 1.0 && h < hend + 1.0); if (!in_bin) continue; const int pool_idx = ph * pool_w + pw; int x_idx = 0; T h_stride = (hend - hstart) / 3.0; T w_stride = (wend - wstart) / 3.0; for (T rh = hstart + h_stride; rh <= hend - h_stride + 0.01; rh += max(h_stride, 0.01)) { for (T rw = wstart + w_stride; rw <= wend - w_stride + 0.01; rw += max(w_stride, 0.01)) { x_idx++; if (offset_mask[pool_idx] != x_idx) continue; int hlow = min(max(static_cast<int>(floor(rh)), 0), height - 1); int hhigh = min(hlow + 1, height - 1); int wleft = min(max(static_cast<int>(floor(rw)), 0), width - 1); int wright = min(wleft + 1, width - 1); if (h != hlow && h != hhigh && w != wleft && w != wright) continue; T alpha = (hlow == hhigh) ? static_cast<T>(0.5) : (rh - hlow) / (hhigh - hlow); T beta = (wleft == wright) ? static_cast<T>(0.5) : (rw - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_dy[pool_idx] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_dy[pool_idx] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_dy[pool_idx] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_dy[pool_idx] * alpha * beta; } } } } } dx[idx] = gradient; } } template<> void ROIAlignGrad<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* dy, Tensor* roi, Tensor* mask, Tensor* dx) { auto* dYdata = dy->data<float, CUDAContext>(); auto* Rdata = roi->data<float, CUDAContext>(); auto* Mdata = mask->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); TIndex channels = dx->dim(1), count = dx->count(); TIndex height = dx->dim(2), width = dx->dim(3); _ROIAlignGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, roi->dim(0), spatial_scale, channels, height, width, pool_h, pool_w, dYdata, Rdata, Mdata, dXdata); CUDA_POST_KERNEL_CHECK; } } // namespace kernel } // namespace dragon #endif // WITH_CUDA
28222618e138b6b7e701b96554970bc536db0e8a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/apply_gradient_descent_impl.cuh" #include "include/hip/hip_fp16.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" template <typename T> __global__ void ApplyGradientDescent(const size_t size, T *var, const T *alpha, const T *delta, T *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { const T alpha_value = alpha[0]; var[pos] -= alpha_value * delta[pos]; output[pos] = var[pos]; } } template <typename T> hipError_t CalApplyGradientDescent(const size_t &size, T *var, const T *alpha, const T *delta, T *output, hipStream_t cuda_stream) { hipLaunchKernelGGL(( ApplyGradientDescent), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, var, alpha, delta, output); CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<float>(const size_t &size, float *var, const float *alpha, const float *delta, float *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<half>(const size_t &size, half *var, const half *alpha, const half *delta, half *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<int8_t>(const size_t &size, int8_t *var, const int8_t *alpha, const int8_t *delta, int8_t *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<uint8_t>(const size_t &size, uint8_t *var, const uint8_t *alpha, const uint8_t *delta, uint8_t *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<int16_t>(const size_t &size, int16_t *var, const int16_t *alpha, const int16_t *delta, int16_t *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<uint16_t>(const size_t &size, uint16_t *var, const uint16_t *alpha, const uint16_t *delta, uint16_t *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<uint32_t>(const size_t &size, uint32_t *var, const uint32_t *alpha, const uint32_t *delta, uint32_t *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<int64_t>(const size_t &size, int64_t *var, const int64_t *alpha, const int64_t *delta, int64_t *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<uint64_t>(const size_t &size, uint64_t *var, const uint64_t *alpha, const uint64_t *delta, uint64_t *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<double>(const size_t &size, double *var, const double *alpha, const double *delta, double *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<Complex<float>>(const size_t &size, Complex<float> *var, const Complex<float> *alpha, const Complex<float> *delta, Complex<float> *output, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalApplyGradientDescent<Complex<double>>(const size_t &size, Complex<double> *var, const Complex<double> *alpha, const Complex<double> *delta, Complex<double> *output, hipStream_t cuda_stream);
28222618e138b6b7e701b96554970bc536db0e8a.cu
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/apply_gradient_descent_impl.cuh" #include "include/cuda_fp16.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" template <typename T> __global__ void ApplyGradientDescent(const size_t size, T *var, const T *alpha, const T *delta, T *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { const T alpha_value = alpha[0]; var[pos] -= alpha_value * delta[pos]; output[pos] = var[pos]; } } template <typename T> cudaError_t CalApplyGradientDescent(const size_t &size, T *var, const T *alpha, const T *delta, T *output, cudaStream_t cuda_stream) { ApplyGradientDescent<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, var, alpha, delta, output); CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<float>(const size_t &size, float *var, const float *alpha, const float *delta, float *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<half>(const size_t &size, half *var, const half *alpha, const half *delta, half *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<int8_t>(const size_t &size, int8_t *var, const int8_t *alpha, const int8_t *delta, int8_t *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<uint8_t>(const size_t &size, uint8_t *var, const uint8_t *alpha, const uint8_t *delta, uint8_t *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<int16_t>(const size_t &size, int16_t *var, const int16_t *alpha, const int16_t *delta, int16_t *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<uint16_t>(const size_t &size, uint16_t *var, const uint16_t *alpha, const uint16_t *delta, uint16_t *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<uint32_t>(const size_t &size, uint32_t *var, const uint32_t *alpha, const uint32_t *delta, uint32_t *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<int64_t>(const size_t &size, int64_t *var, const int64_t *alpha, const int64_t *delta, int64_t *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<uint64_t>(const size_t &size, uint64_t *var, const uint64_t *alpha, const uint64_t *delta, uint64_t *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<double>(const size_t &size, double *var, const double *alpha, const double *delta, double *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<Complex<float>>(const size_t &size, Complex<float> *var, const Complex<float> *alpha, const Complex<float> *delta, Complex<float> *output, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalApplyGradientDescent<Complex<double>>(const size_t &size, Complex<double> *var, const Complex<double> *alpha, const Complex<double> *delta, Complex<double> *output, cudaStream_t cuda_stream);
7d35bd50f25784c03b9e80a4f18d59cc32fc9a0a.hip
// !!! This is a file automatically generated by hipify!!! #include "THHBlas.h" #include "THHGeneral.h" #include "THHHalf.h" float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Sdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; double result; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Ddot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } #ifdef CUDA_HALF_TENSOR float THCudaBlas_Hdot(THCState *state, int64_t n, half *x, int64_t incx, half *y, int64_t incy) { #if TORCH_HIP_VERSION >= 8000 if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDotEx_v2(handle, i_n, x, HIP_R_16F, i_incx, y, HIP_R_16F, i_incy, &result, HIP_R_32F, HIP_R_32F)); return result; } THError("Cublas_Hdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; #else THError("Cublas_Hdot requires CUDA 8.0+"); return 0; #endif } #endif /* Level 2 */ void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy) { if(n == 1) lda = m; hipblasOperation_t op; if (trans == 't') op = HIPBLAS_OP_T; else if (trans == 'n') op = HIPBLAS_OP_N; else if (trans == 'c') op = HIPBLAS_OP_C; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Sgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy) { if(n == 1) lda = m; hipblasOperation_t op; if (trans == 't') op = HIPBLAS_OP_T; else if (trans == 'n') op = HIPBLAS_OP_N; else if (trans == 'c') op = HIPBLAS_OP_C; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Dgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda) { if(n == 1) lda = m; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Sger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda) { if(n == 1) lda = m; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Dger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } hipblasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return HIPBLAS_OP_T; else if (trans == 'n') return HIPBLAS_OP_N; else if (trans == 'c') return HIPBLAS_OP_C; else { THError("trans must be one of: t, n, c"); return HIPBLAS_OP_T; } } void adjustLd(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); if(n == 1) *ldc = m; if(transa_) { if(m == 1) *lda = k; } else { if(k == 1) *lda = m; } if(transb_) { if(k == 1) *ldb = n; } else { if(n == 1) *ldb = k; } } /* Level 3 */ void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Sgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } #ifdef CUDA_HALF_TENSOR // In CUDA 8.0, definition of data types for sgemmex changed #if TORCH_HIP_VERSION < 8000 # define HIP_R_16F HIPBLAS_DATA_HALF #endif void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, half alpha, half *a, int64_t lda, half *b, int64_t ldb, half beta, half *c, int64_t ldc) { adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); // Simulated Hgemm float fAlpha = THC_half2float(alpha); float fBeta = THC_half2float(beta); #if TORCH_HIP_VERSION < 9000 THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, HIP_R_16F, i_lda, b, HIP_R_16F, i_ldb, &fBeta, c, HIP_R_16F, i_ldc)); #else hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); THCublasCheck(hipblasGemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, HIP_R_16F, i_lda, b, HIP_R_16F, i_ldb, &fBeta, c, HIP_R_16F, i_ldc, HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); }else{ THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, HIP_R_16F, i_lda, b, HIP_R_16F, i_ldb, &fBeta, c, HIP_R_16F, i_ldc)); } #endif return; } THError("Cublas_Hgemm only supports m, n, k, lda, ldb, ldc" "with th bound [val] <= %d", INT_MAX); } #endif void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Dgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb, float beta, float *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); } #if TORCH_HIP_VERSION >= 8000 void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB, float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb, double beta, double *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); } #if TORCH_HIP_VERSION >= 8000 void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB, double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif /* Inverse */ void THCudaBlas_Sgetrf(THCState *state, int n, float **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } void THCudaBlas_Dgetrf(THCState *state, int n, double **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } THC_API void THCudaBlas_Sgetrs(THCState *state, char transa, int n, int nrhs, const float **a, int lda, int *pivot, float **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } THC_API void THCudaBlas_Dgetrs(THCState *state, char transa, int n, int nrhs, const double **a, int lda, int *pivot, double **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } void THCudaBlas_Sgetri(THCState *state, int n, const float **a, int lda, int *pivot, float **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); } void THCudaBlas_Dgetri(THCState *state, int n, const double **a, int lda, int *pivot, double **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); }
7d35bd50f25784c03b9e80a4f18d59cc32fc9a0a.cu
#include "THCBlas.h" #include "THCGeneral.h" #include "THCHalf.h" float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Sdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; double result; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Ddot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } #ifdef CUDA_HALF_TENSOR float THCudaBlas_Hdot(THCState *state, int64_t n, half *x, int64_t incx, half *y, int64_t incy) { #if CUDA_VERSION >= 8000 if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDotEx(handle, i_n, x, CUDA_R_16F, i_incx, y, CUDA_R_16F, i_incy, &result, CUDA_R_32F, CUDA_R_32F)); return result; } THError("Cublas_Hdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; #else THError("Cublas_Hdot requires CUDA 8.0+"); return 0; #endif } #endif /* Level 2 */ void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy) { if(n == 1) lda = m; cublasOperation_t op; if (trans == 't') op = CUBLAS_OP_T; else if (trans == 'n') op = CUBLAS_OP_N; else if (trans == 'c') op = CUBLAS_OP_C; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Sgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy) { if(n == 1) lda = m; cublasOperation_t op; if (trans == 't') op = CUBLAS_OP_T; else if (trans == 'n') op = CUBLAS_OP_N; else if (trans == 'c') op = CUBLAS_OP_C; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Dgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda) { if(n == 1) lda = m; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Sger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda) { if(n == 1) lda = m; if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Dger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } cublasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return CUBLAS_OP_T; else if (trans == 'n') return CUBLAS_OP_N; else if (trans == 'c') return CUBLAS_OP_C; else { THError("trans must be one of: t, n, c"); return CUBLAS_OP_T; } } void adjustLd(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); if(n == 1) *ldc = m; if(transa_) { if(m == 1) *lda = k; } else { if(k == 1) *lda = m; } if(transb_) { if(k == 1) *ldb = n; } else { if(n == 1) *ldb = k; } } /* Level 3 */ void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Sgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } #ifdef CUDA_HALF_TENSOR // In CUDA 8.0, definition of data types for sgemmex changed #if CUDA_VERSION < 8000 # define CUDA_R_16F CUBLAS_DATA_HALF #endif void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, half alpha, half *a, int64_t lda, half *b, int64_t ldb, half beta, half *c, int64_t ldc) { adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); // Simulated Hgemm float fAlpha = THC_half2float(alpha); float fBeta = THC_half2float(beta); #if CUDA_VERSION < 9000 THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, CUDA_R_16F, i_lda, b, CUDA_R_16F, i_ldb, &fBeta, c, CUDA_R_16F, i_ldc)); #else cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); THCublasCheck(cublasGemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, CUDA_R_16F, i_lda, b, CUDA_R_16F, i_ldb, &fBeta, c, CUDA_R_16F, i_ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); }else{ THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, CUDA_R_16F, i_lda, b, CUDA_R_16F, i_ldb, &fBeta, c, CUDA_R_16F, i_ldc)); } #endif return; } THError("Cublas_Hgemm only supports m, n, k, lda, ldb, ldc" "with th bound [val] <= %d", INT_MAX); } #endif void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Dgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb, float beta, float *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); } #if CUDA_VERSION >= 8000 void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB, float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb, double beta, double *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); } #if CUDA_VERSION >= 8000 void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB, double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif /* Inverse */ void THCudaBlas_Sgetrf(THCState *state, int n, float **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } void THCudaBlas_Dgetrf(THCState *state, int n, double **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } THC_API void THCudaBlas_Sgetrs(THCState *state, char transa, int n, int nrhs, const float **a, int lda, int *pivot, float **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square cublasOperation_t opa = convertTransToCublasOperation(transa); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } THC_API void THCudaBlas_Dgetrs(THCState *state, char transa, int n, int nrhs, const double **a, int lda, int *pivot, double **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square cublasOperation_t opa = convertTransToCublasOperation(transa); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } void THCudaBlas_Sgetri(THCState *state, int n, const float **a, int lda, int *pivot, float **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); } void THCudaBlas_Dgetri(THCState *state, int n, const double **a, int lda, int *pivot, double **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); }
a4024f78cd5a3d2ecd4b49fd8735c5c43c07fa40.hip
// !!! This is a file automatically generated by hipify!!! /* This is the file, written in C++ that is compiled into GPU machine code Contains the functions: __global__ double* RK4 __host__ __device__ double* Equation __host__ __device__ double* christoffel Struct main_cuda The main C++ library, Geodesic, calls this file to run the RK4 solving in parallel on the CUDA GPU Python, at runtime, compiles this file if the christoffel symbols are edited */ // CUDA-specific includes #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" // General includes #include <stdio.h> #include <iostream> #include <math.h> #include <vector> // Includes main C++ header #include "module.h" // 64 different functions that store the christoffel symbols across a 4x4x4 matrix // These functions are written with comments so that python can find the specific ones __host__ __device__ float christoffel_000(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_000 */ 0 /* CHRISTOFFEL_000 */; } __host__ __device__ float christoffel_001(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_001 */ 0 /* CHRISTOFFEL_001 */; } __host__ __device__ float christoffel_002(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_002 */ 0 /* CHRISTOFFEL_002 */; } __host__ __device__ float christoffel_003(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_003 */ 0 /* CHRISTOFFEL_003 */; } __host__ __device__ float christoffel_010(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_010 */ 0 /* CHRISTOFFEL_010 */; } __host__ __device__ float christoffel_011(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_011 */ (1.0/2.0)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 5.0/3.0) /* CHRISTOFFEL_011 */; } __host__ __device__ float christoffel_012(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_012 */ 0 /* CHRISTOFFEL_012 */; } __host__ __device__ float christoffel_013(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_013 */ 0 /* CHRISTOFFEL_013 */; } __host__ __device__ float christoffel_020(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_020 */ 0 /* CHRISTOFFEL_020 */; } __host__ __device__ float christoffel_021(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_021 */ 0 /* CHRISTOFFEL_021 */; } __host__ __device__ float christoffel_022(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_022 */ (3.0/8.0)*x_0/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_022 */; } __host__ __device__ float christoffel_023(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_023 */ 0 /* CHRISTOFFEL_023 */; } __host__ __device__ float christoffel_030(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_030 */ 0 /* CHRISTOFFEL_030 */; } __host__ __device__ float christoffel_031(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_031 */ 0 /* CHRISTOFFEL_031 */; } __host__ __device__ float christoffel_032(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_032 */ 0 /* CHRISTOFFEL_032 */; } __host__ __device__ float christoffel_033(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_033 */ (3.0/8.0)*x_0*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2) /* CHRISTOFFEL_033 */; } __host__ __device__ float christoffel_100(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_100 */ 0 /* CHRISTOFFEL_100 */; } __host__ __device__ float christoffel_101(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_101 */ (1.0/2.0)/(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_101 */; } __host__ __device__ float christoffel_102(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_102 */ 0 /* CHRISTOFFEL_102 */; } __host__ __device__ float christoffel_103(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_103 */ 0 /* CHRISTOFFEL_103 */; } __host__ __device__ float christoffel_110(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_110 */ (1.0/2.0)/(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_110 */; } __host__ __device__ float christoffel_111(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_111 */ -1.0/2.0/(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_111 */; } __host__ __device__ float christoffel_112(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_112 */ 0 /* CHRISTOFFEL_112 */; } __host__ __device__ float christoffel_113(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_113 */ 0 /* CHRISTOFFEL_113 */; } __host__ __device__ float christoffel_120(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_120 */ 0 /* CHRISTOFFEL_120 */; } __host__ __device__ float christoffel_121(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_121 */ 0 /* CHRISTOFFEL_121 */; } __host__ __device__ float christoffel_122(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_122 */ -::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0)*(-3.0/8.0*x_0/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_122 */; } __host__ __device__ float christoffel_123(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_123 */ 0 /* CHRISTOFFEL_123 */; } __host__ __device__ float christoffel_130(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_130 */ 0 /* CHRISTOFFEL_130 */; } __host__ __device__ float christoffel_131(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_131 */ 0 /* CHRISTOFFEL_131 */; } __host__ __device__ float christoffel_132(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_132 */ 0 /* CHRISTOFFEL_132 */; } __host__ __device__ float christoffel_133(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_133 */ -::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0)*(-3.0/8.0*x_0*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_133 */; } __host__ __device__ float christoffel_200(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_200 */ 0 /* CHRISTOFFEL_200 */; } __host__ __device__ float christoffel_201(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_201 */ 0 /* CHRISTOFFEL_201 */; } __host__ __device__ float christoffel_202(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_202 */ (-3.0/8.0*x_0/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_202 */; } __host__ __device__ float christoffel_203(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_203 */ 0 /* CHRISTOFFEL_203 */; } __host__ __device__ float christoffel_210(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_210 */ 0 /* CHRISTOFFEL_210 */; } __host__ __device__ float christoffel_211(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_211 */ 0 /* CHRISTOFFEL_211 */; } __host__ __device__ float christoffel_212(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_212 */ ((3.0/8.0)*x_0/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_212 */; } __host__ __device__ float christoffel_213(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_213 */ 0 /* CHRISTOFFEL_213 */; } __host__ __device__ float christoffel_220(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_220 */ (-3.0/8.0*x_0/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_220 */; } __host__ __device__ float christoffel_221(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_221 */ ((3.0/8.0)*x_0/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_221 */; } __host__ __device__ float christoffel_222(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_222 */ 0 /* CHRISTOFFEL_222 */; } __host__ __device__ float christoffel_223(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_223 */ 0 /* CHRISTOFFEL_223 */; } __host__ __device__ float christoffel_230(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_230 */ 0 /* CHRISTOFFEL_230 */; } __host__ __device__ float christoffel_231(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_231 */ 0 /* CHRISTOFFEL_231 */; } __host__ __device__ float christoffel_232(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_232 */ 0 /* CHRISTOFFEL_232 */; } __host__ __device__ float christoffel_233(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_233 */ (-3.0/2.0*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2) + (3.0/2.0)*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_233 */; } __host__ __device__ float christoffel_300(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_300 */ 0 /* CHRISTOFFEL_300 */; } __host__ __device__ float christoffel_301(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_301 */ 0 /* CHRISTOFFEL_301 */; } __host__ __device__ float christoffel_302(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_302 */ 0 /* CHRISTOFFEL_302 */; } __host__ __device__ float christoffel_303(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_303 */ (-3.0/8.0*x_0*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_303 */; } __host__ __device__ float christoffel_310(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_310 */ 0 /* CHRISTOFFEL_310 */; } __host__ __device__ float christoffel_311(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_311 */ 0 /* CHRISTOFFEL_311 */; } __host__ __device__ float christoffel_312(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_312 */ 0 /* CHRISTOFFEL_312 */; } __host__ __device__ float christoffel_313(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_313 */ ((3.0/8.0)*x_0*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_313 */; } __host__ __device__ float christoffel_320(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_320 */ 0 /* CHRISTOFFEL_320 */; } __host__ __device__ float christoffel_321(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_321 */ 0 /* CHRISTOFFEL_321 */; } __host__ __device__ float christoffel_322(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_322 */ 0 /* CHRISTOFFEL_322 */; } __host__ __device__ float christoffel_323(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_323 */ ((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_323 */; } __host__ __device__ float christoffel_330(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_330 */ (-3.0/8.0*x_0*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_330 */; } __host__ __device__ float christoffel_331(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_331 */ ((3.0/8.0)*x_0*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1*::pow(std::sin(x_2), 2)/::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_331 */; } __host__ __device__ float christoffel_332(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_332 */ ((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_332 */; } __host__ __device__ float christoffel_333(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_333 */ 0 /* CHRISTOFFEL_333 */; } // Main christoffel function that returns the others depending on the index parameters i, j, k __host__ __device__ float christoffel(float x[4], int i, int j, int k) { float x_0 = x[0]; float x_1 = x[1]; float x_2 = x[2]; float x_3 = x[3]; switch (i) { default: return 0; case 0: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_000(x_0, x_1, x_2, x_3); break; case 1: return christoffel_001(x_0, x_1, x_2, x_3); break; case 2: return christoffel_002(x_0, x_1, x_2, x_3); break; case 3: return christoffel_003(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_010(x_0, x_1, x_2, x_3); break; case 1: return christoffel_011(x_0, x_1, x_2, x_3); break; case 2: return christoffel_012(x_0, x_1, x_2, x_3); break; case 3: return christoffel_013(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_020(x_0, x_1, x_2, x_3); break; case 1: return christoffel_021(x_0, x_1, x_2, x_3); break; case 2: return christoffel_022(x_0, x_1, x_2, x_3); break; case 3: return christoffel_023(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_030(x_0, x_1, x_2, x_3); break; case 1: return christoffel_031(x_0, x_1, x_2, x_3); break; case 2: return christoffel_032(x_0, x_1, x_2, x_3); break; case 3: return christoffel_033(x_0, x_1, x_2, x_3); break; } } case 1: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_100(x_0, x_1, x_2, x_3); break; case 1: return christoffel_101(x_0, x_1, x_2, x_3); break; case 2: return christoffel_102(x_0, x_1, x_2, x_3); break; case 3: return christoffel_103(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_110(x_0, x_1, x_2, x_3); break; case 1: return christoffel_111(x_0, x_1, x_2, x_3); break; case 2: return christoffel_112(x_0, x_1, x_2, x_3); break; case 3: return christoffel_113(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_120(x_0, x_1, x_2, x_3); break; case 1: return christoffel_121(x_0, x_1, x_2, x_3); break; case 2: return christoffel_122(x_0, x_1, x_2, x_3); break; case 3: return christoffel_123(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_130(x_0, x_1, x_2, x_3); break; case 1: return christoffel_131(x_0, x_1, x_2, x_3); break; case 2: return christoffel_132(x_0, x_1, x_2, x_3); break; case 3: return christoffel_133(x_0, x_1, x_2, x_3); break; } } case 2: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_200(x_0, x_1, x_2, x_3); break; case 1: return christoffel_201(x_0, x_1, x_2, x_3); break; case 2: return christoffel_202(x_0, x_1, x_2, x_3); break; case 3: return christoffel_203(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_210(x_0, x_1, x_2, x_3); break; case 1: return christoffel_211(x_0, x_1, x_2, x_3); break; case 2: return christoffel_212(x_0, x_1, x_2, x_3); break; case 3: return christoffel_213(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_220(x_0, x_1, x_2, x_3); break; case 1: return christoffel_221(x_0, x_1, x_2, x_3); break; case 2: return christoffel_222(x_0, x_1, x_2, x_3); break; case 3: return christoffel_223(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_230(x_0, x_1, x_2, x_3); break; case 1: return christoffel_231(x_0, x_1, x_2, x_3); break; case 2: return christoffel_232(x_0, x_1, x_2, x_3); break; case 3: return christoffel_233(x_0, x_1, x_2, x_3); break; } } case 3: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_300(x_0, x_1, x_2, x_3); break; case 1: return christoffel_301(x_0, x_1, x_2, x_3); break; case 2: return christoffel_302(x_0, x_1, x_2, x_3); break; case 3: return christoffel_303(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_310(x_0, x_1, x_2, x_3); break; case 1: return christoffel_311(x_0, x_1, x_2, x_3); break; case 2: return christoffel_312(x_0, x_1, x_2, x_3); break; case 3: return christoffel_313(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_320(x_0, x_1, x_2, x_3); break; case 1: return christoffel_321(x_0, x_1, x_2, x_3); break; case 2: return christoffel_322(x_0, x_1, x_2, x_3); break; case 3: return christoffel_323(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_330(x_0, x_1, x_2, x_3); break; case 1: return christoffel_331(x_0, x_1, x_2, x_3); break; case 2: return christoffel_332(x_0, x_1, x_2, x_3); break; case 3: return christoffel_333(x_0, x_1, x_2, x_3); break; } } } return 0; } // Main Equation function that takes input of the 4-position and 4-velocity and returns the results of the geodesic equations __host__ __device__ float *Equation(float c_F0[8], float *c_dudv) { // Initializes u, v, du, and dv float u[4] = { c_F0[0], c_F0[1], c_F0[2], c_F0[3] }; float v[4] = { c_F0[4], c_F0[5], c_F0[6], c_F0[7] }; float du[4] = { v[0], v[1], v[2], v[3] }; float dv[4] = { 0, 0, 0, 0 }; // Sets respective values of dv for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { for (int k = 0; k < 4; k++) { dv[i] -= christoffel(u, i, j, k) * v[j] * v[k]; } } } // Sets values of dudv c_dudv[0] = du[0]; c_dudv[1] = du[1]; c_dudv[2] = du[2]; c_dudv[3] = du[3]; c_dudv[4] = dv[0]; c_dudv[5] = dv[1]; c_dudv[6] = dv[2]; c_dudv[7] = dv[3]; // Returns resulting dudv return c_dudv; } // Main RK4 function // This function is ran directly on the GPU and solves the geodesic equations using the specific inputs // F0 - 4-position and 4-velocity // dudv - pointer returned from the Equation function // S - pointer returned by this function // X - pointer returned by this function storing the solved data // a_bound, b_bound - bounds of the equation // nstep - number of steps to partition the bounds into __global__ void RK4(float *c_F0, float *c_dudv, float* c_S, float* c_X, float a_bound, float b_bound, size_t nstep) { // Gets offset of array indexes depending on thread and block number size_t offset = 8 * threadIdx.x + 8 * blockIdx.x * blockDim.x; // Gets starting s and step size float s = a_bound; float h = (b_bound - a_bound)/nstep; // Gets starting position and velocity float pos[4] = { c_F0[0 + offset], c_F0[1 + offset], c_F0[2 + offset], c_F0[3 + offset] }; float vel[4] = { c_F0[4 + offset], c_F0[5 + offset], c_F0[6 + offset], c_F0[7 + offset] }; // Initializes Y and Y_tmp and sets equal to starting pos and vel float Y[8]; float Y_tmp[8]; for (int i = 0; i < 8; i++) { Y[i] = c_F0[i + offset]; } // Sets starting S and X value in the output arrays c_S[nstep * threadIdx.x + nstep * blockIdx.x * blockDim.x] = s; for (int i = 0; i < 8; i++) { c_X[i + offset * nstep] = c_F0[i + offset]; } // Main RK4 for loop for (int step = 0; step < nstep; step++) { //Creates temporary arrays for the loop for (int i = 0; i < 4; i++) { Y[i] = pos[i]; Y[i + 4] = vel[i]; } __syncthreads(); printf("Step number: %d \r", step); // Butcher Table for RK4: // 0.0 | // 0.5 | 0.5 // 0.5 | 0.0 0.5 // 1.0 | 0.0 0.0 1.0 // +---------------------- // 1/6 1/3 1/3 1/6 // Defines the weights used for the method // Sets weights and evaluates k1 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i]; } float* k1 = Equation(Y_tmp, c_dudv); // Sets weights and evaluates k2 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i] + 0.5 * h * k1[i]; } float* k2 = Equation(Y_tmp, c_dudv); // Sets weights and evaluates k3 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i] + 0.5 * h * k2[i]; } float* k3 = Equation(Y_tmp, c_dudv); // Sets weights and evaluates k4 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i] + 1.0 * h * k3[i]; } float* k4 = Equation(Y_tmp, c_dudv); // Initializes delta to the weighted sum of the four k values float delta[8]; for (int i = 0; i < 8; i++) { delta[i] = h / 6.0 * (k1[i] + 2.0 * k2[i] + 2.0 * k3[i] + k4[i]); } // Adds delta to the position and velocity for (int i = 0; i < 4; i++) { pos[i] += delta[i]; vel[i] += delta[i + 4]; } // Sets next S value c_S[step + nstep * threadIdx.x + nstep * blockIdx.x * blockDim.x] = (step+1) * h; // Stores solved positions and velocities into the X array for (int i = 0; i < 4; i++) { c_X[i + 8 * step + offset * nstep] = pos[i]; c_X[i + 4 + 8 * step + offset * nstep] = vel[i]; } } } // Struct to store the S and X vectors and creates a type return struct S_X { std::vector<std::vector<float>> S; std::vector<std::vector<std::vector<float>>> X; }; typedef struct S_X S_X_t; // Main callable function for the cuda file // F0 - Vector contatinging all starting pos and vel // a_bound, b_bound - Bounds of solver // nstep - Number of steps // threadsPerBlock - Number of threads per blocks on the GPU grid MAX: (GTX 1060: 1024, RTX 2070: 1024) S_X_t cuda_main(std::vector<std::vector<float>> F0, float a_bound, float b_bound, size_t nstep, int threadsPerBlock) { // Sets size and num variables, num is number of input values size_t size = 8 * sizeof(float); size_t num = F0.size(); // Creates pointers to all host variables float* h_F0 = new float[8 * num]; float* h_dudv = new float[8]; float* h_S = new float[nstep * num]; float* h_X = new float[8 * nstep * num]; // Initializes host F0 to values of vector F0 for (int i = 0; i < num; i++) { for (int j = 0; j < 8; j++) { h_F0[j + 8 * i] = F0[i][j]; } } // Initializes all values of host dudv to 0 for (int i = 0; i < 8; i++) { h_dudv[i] = 0; } // Creates cuda F0 and allocates to GPU memory float* c_F0 = nullptr; hipMalloc((void**)&c_F0, size * num); // Creates cuda dudv and allocates to GPU memory float* c_dudv = nullptr; hipMalloc((void**)&c_dudv, size); // Creates cuda S and allocates to GPU memory float* c_S = nullptr; hipMalloc((void**)&c_S, nstep * num * sizeof(float)); // Creates cuda X and allocates to GPU memory float* c_X = nullptr; hipMalloc((void**)&c_X, size * nstep * num); // Copies values from host F0 to cuda F0 hipMemcpy(c_F0, h_F0, size * num, hipMemcpyHostToDevice); // Calculates blocks per grid depending on number of threads and size of input int blocksPerGrid = num / threadsPerBlock; // Runs the RK4 method on the GPU hipLaunchKernelGGL(( RK4) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, c_F0, c_dudv, c_S, c_X, a_bound, b_bound, nstep); // Waits for all threads to finish hipDeviceSynchronize(); // Copies cuda dudv, S, and X to host dudv, S, and X hipMemcpy(h_dudv, c_dudv, size, hipMemcpyDeviceToHost); hipMemcpy(h_S, c_S, nstep * num * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_X, c_X, size * nstep * num, hipMemcpyDeviceToHost); // Frees cuda F0, dudv, S, and X from GPU memory hipFree(c_F0); hipFree(c_dudv); hipFree(c_S); hipFree(c_X); // Creates S and X vectors std::vector<std::vector<float>> S(num, std::vector<float>(nstep)); std::vector<std::vector<std::vector<float>>> X(num, std::vector<std::vector<float>>(nstep, std::vector<float>(8))); // Sets repective values for S and X vectors from host S and X arrays for (int i = 0; i < num; i++) { for (int j = 0; j < nstep; j++) { for (int k = 0; k < 8; k++) { X[i][j][k] = h_X[k + 8 * j + 8 * nstep * i]; } S[i][j] = h_S[j + nstep * i]; } } // Deletes host F0, dudv, S, and X arrays delete[] h_F0; delete[] h_dudv; delete[] h_S; delete[] h_X; // Creates output struct S_X_t SX; // Sets to repective vectors SX.S = S; SX.X = X; // Returns data return SX; }
a4024f78cd5a3d2ecd4b49fd8735c5c43c07fa40.cu
/* This is the file, written in C++ that is compiled into GPU machine code Contains the functions: __global__ double* RK4 __host__ __device__ double* Equation __host__ __device__ double* christoffel Struct main_cuda The main C++ library, Geodesic, calls this file to run the RK4 solving in parallel on the CUDA GPU Python, at runtime, compiles this file if the christoffel symbols are edited */ // CUDA-specific includes #include "cuda.h" #include "cuda_runtime.h" #include "cuda_runtime_api.h" #include "device_launch_parameters.h" #include "device_functions.h" // General includes #include <stdio.h> #include <iostream> #include <math.h> #include <vector> // Includes main C++ header #include "module.h" // 64 different functions that store the christoffel symbols across a 4x4x4 matrix // These functions are written with comments so that python can find the specific ones __host__ __device__ float christoffel_000(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_000 */ 0 /* CHRISTOFFEL_000 */; } __host__ __device__ float christoffel_001(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_001 */ 0 /* CHRISTOFFEL_001 */; } __host__ __device__ float christoffel_002(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_002 */ 0 /* CHRISTOFFEL_002 */; } __host__ __device__ float christoffel_003(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_003 */ 0 /* CHRISTOFFEL_003 */; } __host__ __device__ float christoffel_010(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_010 */ 0 /* CHRISTOFFEL_010 */; } __host__ __device__ float christoffel_011(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_011 */ (1.0/2.0)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 5.0/3.0) /* CHRISTOFFEL_011 */; } __host__ __device__ float christoffel_012(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_012 */ 0 /* CHRISTOFFEL_012 */; } __host__ __device__ float christoffel_013(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_013 */ 0 /* CHRISTOFFEL_013 */; } __host__ __device__ float christoffel_020(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_020 */ 0 /* CHRISTOFFEL_020 */; } __host__ __device__ float christoffel_021(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_021 */ 0 /* CHRISTOFFEL_021 */; } __host__ __device__ float christoffel_022(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_022 */ (3.0/8.0)*x_0/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_022 */; } __host__ __device__ float christoffel_023(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_023 */ 0 /* CHRISTOFFEL_023 */; } __host__ __device__ float christoffel_030(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_030 */ 0 /* CHRISTOFFEL_030 */; } __host__ __device__ float christoffel_031(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_031 */ 0 /* CHRISTOFFEL_031 */; } __host__ __device__ float christoffel_032(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_032 */ 0 /* CHRISTOFFEL_032 */; } __host__ __device__ float christoffel_033(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_033 */ (3.0/8.0)*x_0*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2) /* CHRISTOFFEL_033 */; } __host__ __device__ float christoffel_100(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_100 */ 0 /* CHRISTOFFEL_100 */; } __host__ __device__ float christoffel_101(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_101 */ (1.0/2.0)/(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_101 */; } __host__ __device__ float christoffel_102(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_102 */ 0 /* CHRISTOFFEL_102 */; } __host__ __device__ float christoffel_103(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_103 */ 0 /* CHRISTOFFEL_103 */; } __host__ __device__ float christoffel_110(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_110 */ (1.0/2.0)/(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_110 */; } __host__ __device__ float christoffel_111(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_111 */ -1.0/2.0/(-3.0/2.0*x_0 + (3.0/2.0)*x_1) /* CHRISTOFFEL_111 */; } __host__ __device__ float christoffel_112(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_112 */ 0 /* CHRISTOFFEL_112 */; } __host__ __device__ float christoffel_113(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_113 */ 0 /* CHRISTOFFEL_113 */; } __host__ __device__ float christoffel_120(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_120 */ 0 /* CHRISTOFFEL_120 */; } __host__ __device__ float christoffel_121(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_121 */ 0 /* CHRISTOFFEL_121 */; } __host__ __device__ float christoffel_122(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_122 */ -std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0)*(-3.0/8.0*x_0/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_122 */; } __host__ __device__ float christoffel_123(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_123 */ 0 /* CHRISTOFFEL_123 */; } __host__ __device__ float christoffel_130(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_130 */ 0 /* CHRISTOFFEL_130 */; } __host__ __device__ float christoffel_131(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_131 */ 0 /* CHRISTOFFEL_131 */; } __host__ __device__ float christoffel_132(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_132 */ 0 /* CHRISTOFFEL_132 */; } __host__ __device__ float christoffel_133(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_133 */ -std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0)*(-3.0/8.0*x_0*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_133 */; } __host__ __device__ float christoffel_200(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_200 */ 0 /* CHRISTOFFEL_200 */; } __host__ __device__ float christoffel_201(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_201 */ 0 /* CHRISTOFFEL_201 */; } __host__ __device__ float christoffel_202(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_202 */ (-3.0/8.0*x_0/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_202 */; } __host__ __device__ float christoffel_203(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_203 */ 0 /* CHRISTOFFEL_203 */; } __host__ __device__ float christoffel_210(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_210 */ 0 /* CHRISTOFFEL_210 */; } __host__ __device__ float christoffel_211(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_211 */ 0 /* CHRISTOFFEL_211 */; } __host__ __device__ float christoffel_212(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_212 */ ((3.0/8.0)*x_0/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_212 */; } __host__ __device__ float christoffel_213(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_213 */ 0 /* CHRISTOFFEL_213 */; } __host__ __device__ float christoffel_220(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_220 */ (-3.0/8.0*x_0/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_220 */; } __host__ __device__ float christoffel_221(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_221 */ ((3.0/8.0)*x_0/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_221 */; } __host__ __device__ float christoffel_222(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_222 */ 0 /* CHRISTOFFEL_222 */; } __host__ __device__ float christoffel_223(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_223 */ 0 /* CHRISTOFFEL_223 */; } __host__ __device__ float christoffel_230(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_230 */ 0 /* CHRISTOFFEL_230 */; } __host__ __device__ float christoffel_231(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_231 */ 0 /* CHRISTOFFEL_231 */; } __host__ __device__ float christoffel_232(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_232 */ 0 /* CHRISTOFFEL_232 */; } __host__ __device__ float christoffel_233(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_233 */ (-3.0/2.0*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2) + (3.0/2.0)*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)) /* CHRISTOFFEL_233 */; } __host__ __device__ float christoffel_300(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_300 */ 0 /* CHRISTOFFEL_300 */; } __host__ __device__ float christoffel_301(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_301 */ 0 /* CHRISTOFFEL_301 */; } __host__ __device__ float christoffel_302(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_302 */ 0 /* CHRISTOFFEL_302 */; } __host__ __device__ float christoffel_303(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_303 */ (-3.0/8.0*x_0*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_303 */; } __host__ __device__ float christoffel_310(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_310 */ 0 /* CHRISTOFFEL_310 */; } __host__ __device__ float christoffel_311(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_311 */ 0 /* CHRISTOFFEL_311 */; } __host__ __device__ float christoffel_312(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_312 */ 0 /* CHRISTOFFEL_312 */; } __host__ __device__ float christoffel_313(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_313 */ ((3.0/8.0)*x_0*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_313 */; } __host__ __device__ float christoffel_320(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_320 */ 0 /* CHRISTOFFEL_320 */; } __host__ __device__ float christoffel_321(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_321 */ 0 /* CHRISTOFFEL_321 */; } __host__ __device__ float christoffel_322(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_322 */ 0 /* CHRISTOFFEL_322 */; } __host__ __device__ float christoffel_323(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_323 */ ((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_323 */; } __host__ __device__ float christoffel_330(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_330 */ (-3.0/8.0*x_0*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/8.0)*x_1*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) + (3.0/4.0)*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_330 */; } __host__ __device__ float christoffel_331(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_331 */ ((3.0/8.0)*x_0*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/8.0*x_1*std::pow(std::sin(x_2), 2)/std::pow(-3.0/2.0*x_0 + (3.0/2.0)*x_1, 2.0/3.0) - 3.0/4.0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_331 */; } __host__ __device__ float christoffel_332(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_332 */ ((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::sin(x_2)*std::cos(x_2))/((3.0/2.0)*x_0*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2) - 3.0/2.0*x_1*std::cbrt(-3.0/2.0*x_0 + (3.0/2.0)*x_1)*std::pow(std::sin(x_2), 2)) /* CHRISTOFFEL_332 */; } __host__ __device__ float christoffel_333(float x_0, float x_1, float x_2, float x_3) { return /* CHRISTOFFEL_333 */ 0 /* CHRISTOFFEL_333 */; } // Main christoffel function that returns the others depending on the index parameters i, j, k __host__ __device__ float christoffel(float x[4], int i, int j, int k) { float x_0 = x[0]; float x_1 = x[1]; float x_2 = x[2]; float x_3 = x[3]; switch (i) { default: return 0; case 0: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_000(x_0, x_1, x_2, x_3); break; case 1: return christoffel_001(x_0, x_1, x_2, x_3); break; case 2: return christoffel_002(x_0, x_1, x_2, x_3); break; case 3: return christoffel_003(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_010(x_0, x_1, x_2, x_3); break; case 1: return christoffel_011(x_0, x_1, x_2, x_3); break; case 2: return christoffel_012(x_0, x_1, x_2, x_3); break; case 3: return christoffel_013(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_020(x_0, x_1, x_2, x_3); break; case 1: return christoffel_021(x_0, x_1, x_2, x_3); break; case 2: return christoffel_022(x_0, x_1, x_2, x_3); break; case 3: return christoffel_023(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_030(x_0, x_1, x_2, x_3); break; case 1: return christoffel_031(x_0, x_1, x_2, x_3); break; case 2: return christoffel_032(x_0, x_1, x_2, x_3); break; case 3: return christoffel_033(x_0, x_1, x_2, x_3); break; } } case 1: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_100(x_0, x_1, x_2, x_3); break; case 1: return christoffel_101(x_0, x_1, x_2, x_3); break; case 2: return christoffel_102(x_0, x_1, x_2, x_3); break; case 3: return christoffel_103(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_110(x_0, x_1, x_2, x_3); break; case 1: return christoffel_111(x_0, x_1, x_2, x_3); break; case 2: return christoffel_112(x_0, x_1, x_2, x_3); break; case 3: return christoffel_113(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_120(x_0, x_1, x_2, x_3); break; case 1: return christoffel_121(x_0, x_1, x_2, x_3); break; case 2: return christoffel_122(x_0, x_1, x_2, x_3); break; case 3: return christoffel_123(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_130(x_0, x_1, x_2, x_3); break; case 1: return christoffel_131(x_0, x_1, x_2, x_3); break; case 2: return christoffel_132(x_0, x_1, x_2, x_3); break; case 3: return christoffel_133(x_0, x_1, x_2, x_3); break; } } case 2: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_200(x_0, x_1, x_2, x_3); break; case 1: return christoffel_201(x_0, x_1, x_2, x_3); break; case 2: return christoffel_202(x_0, x_1, x_2, x_3); break; case 3: return christoffel_203(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_210(x_0, x_1, x_2, x_3); break; case 1: return christoffel_211(x_0, x_1, x_2, x_3); break; case 2: return christoffel_212(x_0, x_1, x_2, x_3); break; case 3: return christoffel_213(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_220(x_0, x_1, x_2, x_3); break; case 1: return christoffel_221(x_0, x_1, x_2, x_3); break; case 2: return christoffel_222(x_0, x_1, x_2, x_3); break; case 3: return christoffel_223(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_230(x_0, x_1, x_2, x_3); break; case 1: return christoffel_231(x_0, x_1, x_2, x_3); break; case 2: return christoffel_232(x_0, x_1, x_2, x_3); break; case 3: return christoffel_233(x_0, x_1, x_2, x_3); break; } } case 3: switch (j) { default: return 0; case 0: switch (k) { default: return 0; case 0: return christoffel_300(x_0, x_1, x_2, x_3); break; case 1: return christoffel_301(x_0, x_1, x_2, x_3); break; case 2: return christoffel_302(x_0, x_1, x_2, x_3); break; case 3: return christoffel_303(x_0, x_1, x_2, x_3); break; } case 1: switch (k) { default: return 0; case 0: return christoffel_310(x_0, x_1, x_2, x_3); break; case 1: return christoffel_311(x_0, x_1, x_2, x_3); break; case 2: return christoffel_312(x_0, x_1, x_2, x_3); break; case 3: return christoffel_313(x_0, x_1, x_2, x_3); break; } case 2: switch (k) { default: return 0; case 0: return christoffel_320(x_0, x_1, x_2, x_3); break; case 1: return christoffel_321(x_0, x_1, x_2, x_3); break; case 2: return christoffel_322(x_0, x_1, x_2, x_3); break; case 3: return christoffel_323(x_0, x_1, x_2, x_3); break; } case 3: switch (k) { default: return 0; case 0: return christoffel_330(x_0, x_1, x_2, x_3); break; case 1: return christoffel_331(x_0, x_1, x_2, x_3); break; case 2: return christoffel_332(x_0, x_1, x_2, x_3); break; case 3: return christoffel_333(x_0, x_1, x_2, x_3); break; } } } return 0; } // Main Equation function that takes input of the 4-position and 4-velocity and returns the results of the geodesic equations __host__ __device__ float *Equation(float c_F0[8], float *c_dudv) { // Initializes u, v, du, and dv float u[4] = { c_F0[0], c_F0[1], c_F0[2], c_F0[3] }; float v[4] = { c_F0[4], c_F0[5], c_F0[6], c_F0[7] }; float du[4] = { v[0], v[1], v[2], v[3] }; float dv[4] = { 0, 0, 0, 0 }; // Sets respective values of dv for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { for (int k = 0; k < 4; k++) { dv[i] -= christoffel(u, i, j, k) * v[j] * v[k]; } } } // Sets values of dudv c_dudv[0] = du[0]; c_dudv[1] = du[1]; c_dudv[2] = du[2]; c_dudv[3] = du[3]; c_dudv[4] = dv[0]; c_dudv[5] = dv[1]; c_dudv[6] = dv[2]; c_dudv[7] = dv[3]; // Returns resulting dudv return c_dudv; } // Main RK4 function // This function is ran directly on the GPU and solves the geodesic equations using the specific inputs // F0 - 4-position and 4-velocity // dudv - pointer returned from the Equation function // S - pointer returned by this function // X - pointer returned by this function storing the solved data // a_bound, b_bound - bounds of the equation // nstep - number of steps to partition the bounds into __global__ void RK4(float *c_F0, float *c_dudv, float* c_S, float* c_X, float a_bound, float b_bound, size_t nstep) { // Gets offset of array indexes depending on thread and block number size_t offset = 8 * threadIdx.x + 8 * blockIdx.x * blockDim.x; // Gets starting s and step size float s = a_bound; float h = (b_bound - a_bound)/nstep; // Gets starting position and velocity float pos[4] = { c_F0[0 + offset], c_F0[1 + offset], c_F0[2 + offset], c_F0[3 + offset] }; float vel[4] = { c_F0[4 + offset], c_F0[5 + offset], c_F0[6 + offset], c_F0[7 + offset] }; // Initializes Y and Y_tmp and sets equal to starting pos and vel float Y[8]; float Y_tmp[8]; for (int i = 0; i < 8; i++) { Y[i] = c_F0[i + offset]; } // Sets starting S and X value in the output arrays c_S[nstep * threadIdx.x + nstep * blockIdx.x * blockDim.x] = s; for (int i = 0; i < 8; i++) { c_X[i + offset * nstep] = c_F0[i + offset]; } // Main RK4 for loop for (int step = 0; step < nstep; step++) { //Creates temporary arrays for the loop for (int i = 0; i < 4; i++) { Y[i] = pos[i]; Y[i + 4] = vel[i]; } __syncthreads(); printf("Step number: %d \r", step); // Butcher Table for RK4: // 0.0 | // 0.5 | 0.5 // 0.5 | 0.0 0.5 // 1.0 | 0.0 0.0 1.0 // +---------------------- // 1/6 1/3 1/3 1/6 // Defines the weights used for the method // Sets weights and evaluates k1 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i]; } float* k1 = Equation(Y_tmp, c_dudv); // Sets weights and evaluates k2 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i] + 0.5 * h * k1[i]; } float* k2 = Equation(Y_tmp, c_dudv); // Sets weights and evaluates k3 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i] + 0.5 * h * k2[i]; } float* k3 = Equation(Y_tmp, c_dudv); // Sets weights and evaluates k4 for (int i = 0; i < 8; i++) { Y_tmp[i] = Y[i] + 1.0 * h * k3[i]; } float* k4 = Equation(Y_tmp, c_dudv); // Initializes delta to the weighted sum of the four k values float delta[8]; for (int i = 0; i < 8; i++) { delta[i] = h / 6.0 * (k1[i] + 2.0 * k2[i] + 2.0 * k3[i] + k4[i]); } // Adds delta to the position and velocity for (int i = 0; i < 4; i++) { pos[i] += delta[i]; vel[i] += delta[i + 4]; } // Sets next S value c_S[step + nstep * threadIdx.x + nstep * blockIdx.x * blockDim.x] = (step+1) * h; // Stores solved positions and velocities into the X array for (int i = 0; i < 4; i++) { c_X[i + 8 * step + offset * nstep] = pos[i]; c_X[i + 4 + 8 * step + offset * nstep] = vel[i]; } } } // Struct to store the S and X vectors and creates a type return struct S_X { std::vector<std::vector<float>> S; std::vector<std::vector<std::vector<float>>> X; }; typedef struct S_X S_X_t; // Main callable function for the cuda file // F0 - Vector contatinging all starting pos and vel // a_bound, b_bound - Bounds of solver // nstep - Number of steps // threadsPerBlock - Number of threads per blocks on the GPU grid MAX: (GTX 1060: 1024, RTX 2070: 1024) S_X_t cuda_main(std::vector<std::vector<float>> F0, float a_bound, float b_bound, size_t nstep, int threadsPerBlock) { // Sets size and num variables, num is number of input values size_t size = 8 * sizeof(float); size_t num = F0.size(); // Creates pointers to all host variables float* h_F0 = new float[8 * num]; float* h_dudv = new float[8]; float* h_S = new float[nstep * num]; float* h_X = new float[8 * nstep * num]; // Initializes host F0 to values of vector F0 for (int i = 0; i < num; i++) { for (int j = 0; j < 8; j++) { h_F0[j + 8 * i] = F0[i][j]; } } // Initializes all values of host dudv to 0 for (int i = 0; i < 8; i++) { h_dudv[i] = 0; } // Creates cuda F0 and allocates to GPU memory float* c_F0 = nullptr; cudaMalloc((void**)&c_F0, size * num); // Creates cuda dudv and allocates to GPU memory float* c_dudv = nullptr; cudaMalloc((void**)&c_dudv, size); // Creates cuda S and allocates to GPU memory float* c_S = nullptr; cudaMalloc((void**)&c_S, nstep * num * sizeof(float)); // Creates cuda X and allocates to GPU memory float* c_X = nullptr; cudaMalloc((void**)&c_X, size * nstep * num); // Copies values from host F0 to cuda F0 cudaMemcpy(c_F0, h_F0, size * num, cudaMemcpyHostToDevice); // Calculates blocks per grid depending on number of threads and size of input int blocksPerGrid = num / threadsPerBlock; // Runs the RK4 method on the GPU RK4 <<<blocksPerGrid, threadsPerBlock>>> (c_F0, c_dudv, c_S, c_X, a_bound, b_bound, nstep); // Waits for all threads to finish cudaDeviceSynchronize(); // Copies cuda dudv, S, and X to host dudv, S, and X cudaMemcpy(h_dudv, c_dudv, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_S, c_S, nstep * num * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_X, c_X, size * nstep * num, cudaMemcpyDeviceToHost); // Frees cuda F0, dudv, S, and X from GPU memory cudaFree(c_F0); cudaFree(c_dudv); cudaFree(c_S); cudaFree(c_X); // Creates S and X vectors std::vector<std::vector<float>> S(num, std::vector<float>(nstep)); std::vector<std::vector<std::vector<float>>> X(num, std::vector<std::vector<float>>(nstep, std::vector<float>(8))); // Sets repective values for S and X vectors from host S and X arrays for (int i = 0; i < num; i++) { for (int j = 0; j < nstep; j++) { for (int k = 0; k < 8; k++) { X[i][j][k] = h_X[k + 8 * j + 8 * nstep * i]; } S[i][j] = h_S[j + nstep * i]; } } // Deletes host F0, dudv, S, and X arrays delete[] h_F0; delete[] h_dudv; delete[] h_S; delete[] h_X; // Creates output struct S_X_t SX; // Sets to repective vectors SX.S = S; SX.X = X; // Returns data return SX; }
61a9b9e8bc4424b2359d542847c8dc23990c5ccd.hip
// !!! This is a file automatically generated by hipify!!! /* This program is used test how the texture memory is arrange on the global memeory so as to find the fastest access pattern. */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <assert.h> #include <iostream> #include <string> #include <fstream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <helper_functions.h> #include <helper_math.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <omp.h> #define PIC_WIDTH 1024 #define PIC_HEIGHT 1024 static const int picSize = PIC_WIDTH * PIC_HEIGHT; #define picLayerNum 64 struct texObjtStrut { hipTextureObject_t texAry[picLayerNum]; }; __global__ void readTextureLayerByLayer(float *output, hipTextureObject_t LayerTex){ float ftemp = 0.0f; #pragma unroll for (int layer = 0; layer < picLayerNum; layer++){ for (int row = 0; row < PIC_HEIGHT; row++) { for (int col = 0; col < PIC_WIDTH; col++) { ftemp += tex2DLayered<float>(LayerTex, row, col, layer); } } } *output = ftemp; } __global__ void readTextureDotByDot(float *output, hipTextureObject_t LayerTex){ float ftemp = 0.0f; #pragma unroll for (int row = 0; row < PIC_HEIGHT; row++) { for (int col = 0; col < PIC_WIDTH; col++) { for (int layer = 0; layer < picLayerNum; layer++){ ftemp += tex2DLayered<float>(LayerTex, row, col, layer); } } } *output = ftemp; } __global__ void readTextureObjtByObjt(float *output, texObjtStrut texObjtSet){ float ftemp = 0.0f; #pragma unroll for (int objt = 0; objt < picLayerNum; objt++){ for (int row = 0; row < PIC_HEIGHT; row++) { for (int col = 0; col < PIC_WIDTH; col++) { ftemp += tex2D<float>(texObjtSet.texAry[objt], row, col); } } } *output = ftemp; } int main() { srand(2015); float *pictureSET; checkCudaErrors(hipHostMalloc((void**)&pictureSET, sizeof(float) * picLayerNum * PIC_WIDTH * PIC_HEIGHT, hipHostMallocDefault)); #pragma omp parallel for for (int i = 0; i < picLayerNum * PIC_WIDTH * PIC_HEIGHT; i++){ pictureSET[i] = rand() / (float)RAND_MAX; } float hostmem = 0.0f; hipArray_t tex_buf; hipChannelFormatDesc desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipExtent extent; extent.width = PIC_WIDTH; extent.height = PIC_HEIGHT; extent.depth = picLayerNum; checkCudaErrors(hipMalloc3DArray(&tex_buf, &desc, extent, hipArrayLayered)); // generate texture object for reading hipTextureObject_t texInput; hipResourceDesc texRes; memset(&texRes, 0, sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = tex_buf; hipTextureDesc texDescr; memset(&texDescr, 0, sizeof(hipTextureDesc)); texDescr.normalizedCoords = 0; //Indicates whether texture reads are normalized or not texDescr.filterMode = hipFilterModeLinear; texDescr.addressMode[0] = hipAddressModeClamp; texDescr.addressMode[1] = hipAddressModeClamp; texDescr.addressMode[2] = hipAddressModeClamp; texDescr.readMode = hipReadModeElementType; /**< Read texture as specified element type */ checkCudaErrors(hipCreateTextureObject(&texInput, &texRes, &texDescr, NULL)); /*---------------- for copy data --------------------- */ hipMemcpy3DParms myparms = { 0 }; myparms.srcPos = make_hipPos(0, 0, 0); myparms.dstPos = make_hipPos(0, 0, 0); myparms.srcPtr = make_hipPitchedPtr(pictureSET, PIC_WIDTH * sizeof(float), PIC_WIDTH, PIC_HEIGHT); myparms.dstArray = tex_buf; myparms.extent = make_hipExtent(PIC_WIDTH, PIC_HEIGHT, picLayerNum); myparms.kind = hipMemcpyHostToDevice; checkCudaErrors(hipMemcpy3D(&myparms)); float *deviceMem; checkCudaErrors(hipMalloc((void**)&deviceMem, sizeof(float))); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float elapsedTime; dim3 grid_tex(1, 1); dim3 thread_tex(1, 1); hipEventRecord(start, 0); readTextureLayerByLayer << < grid_tex, thread_tex >> > (deviceMem, texInput); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("time used layerbylayer = %0.0f\n", elapsedTime); checkCudaErrors(hipMemcpy(&hostmem, deviceMem, sizeof(float), hipMemcpyDeviceToHost)); printf("result = %f\n", hostmem); hipEventRecord(start, 0); readTextureDotByDot << < grid_tex, thread_tex >> > (deviceMem, texInput); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("time used dotbydot = %0.0f\n", elapsedTime); checkCudaErrors(hipMemcpy(&hostmem, deviceMem, sizeof(float), hipMemcpyDeviceToHost)); printf("result = %f\n", hostmem); checkCudaErrors(hipFreeArray(tex_buf)); checkCudaErrors(hipDestroyTextureObject(texInput)); float* tex_data[picLayerNum]; size_t pitch[picLayerNum]; hipResourceDesc resDesc; hipTextureDesc texDesc; texObjtStrut texContainer; for (int i = 0; i < picLayerNum; i++){ hipMallocPitch(&tex_data[i], &pitch[i], sizeof(float)*PIC_WIDTH, PIC_HEIGHT); memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = tex_data[i]; resDesc.res.pitch2D.pitchInBytes = pitch[i]; resDesc.res.pitch2D.width = PIC_WIDTH; resDesc.res.pitch2D.height = PIC_HEIGHT; resDesc.res.pitch2D.desc = hipCreateChannelDesc<float>(); memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = hipReadModeElementType; texDesc.addressMode[0] =// X axis texDesc.addressMode[1] = hipAddressModeClamp;// Y axis texDesc.filterMode = hipFilterModeLinear; hipCreateTextureObject(&texContainer.texAry[i], &resDesc, &texDesc, NULL); checkCudaErrors(hipMemcpy2D(tex_data[i], pitch[i], &pictureSET[i], sizeof(float) * PIC_WIDTH, sizeof(float) * PIC_WIDTH, PIC_HEIGHT, hipMemcpyHostToDevice)); } hipEventRecord(start, 0); readTextureObjtByObjt << < grid_tex, thread_tex >> > (deviceMem, texContainer); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("time used objtbyobjt = %0.0f\n", elapsedTime); checkCudaErrors(hipMemcpy(&hostmem, deviceMem, sizeof(float), hipMemcpyDeviceToHost)); printf("result = %f\n", hostmem); for (int i = 0; i < picLayerNum; i++){ checkCudaErrors(hipFree(tex_data[i])); checkCudaErrors(hipDestroyTextureObject(texContainer.texAry[i])); } checkCudaErrors(hipHostFree(pictureSET)); hipEventDestroy(start); hipEventDestroy(stop); hipDeviceReset(); return 0; }
61a9b9e8bc4424b2359d542847c8dc23990c5ccd.cu
/* This program is used test how the texture memory is arrange on the global memeory so as to find the fastest access pattern. */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <assert.h> #include <iostream> #include <string> #include <fstream> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <helper_functions.h> #include <helper_math.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <omp.h> #define PIC_WIDTH 1024 #define PIC_HEIGHT 1024 static const int picSize = PIC_WIDTH * PIC_HEIGHT; #define picLayerNum 64 struct texObjtStrut { cudaTextureObject_t texAry[picLayerNum]; }; __global__ void readTextureLayerByLayer(float *output, cudaTextureObject_t LayerTex){ float ftemp = 0.0f; #pragma unroll for (int layer = 0; layer < picLayerNum; layer++){ for (int row = 0; row < PIC_HEIGHT; row++) { for (int col = 0; col < PIC_WIDTH; col++) { ftemp += tex2DLayered<float>(LayerTex, row, col, layer); } } } *output = ftemp; } __global__ void readTextureDotByDot(float *output, cudaTextureObject_t LayerTex){ float ftemp = 0.0f; #pragma unroll for (int row = 0; row < PIC_HEIGHT; row++) { for (int col = 0; col < PIC_WIDTH; col++) { for (int layer = 0; layer < picLayerNum; layer++){ ftemp += tex2DLayered<float>(LayerTex, row, col, layer); } } } *output = ftemp; } __global__ void readTextureObjtByObjt(float *output, texObjtStrut texObjtSet){ float ftemp = 0.0f; #pragma unroll for (int objt = 0; objt < picLayerNum; objt++){ for (int row = 0; row < PIC_HEIGHT; row++) { for (int col = 0; col < PIC_WIDTH; col++) { ftemp += tex2D<float>(texObjtSet.texAry[objt], row, col); } } } *output = ftemp; } int main() { srand(2015); float *pictureSET; checkCudaErrors(cudaHostAlloc((void**)&pictureSET, sizeof(float) * picLayerNum * PIC_WIDTH * PIC_HEIGHT, cudaHostAllocDefault)); #pragma omp parallel for for (int i = 0; i < picLayerNum * PIC_WIDTH * PIC_HEIGHT; i++){ pictureSET[i] = rand() / (float)RAND_MAX; } float hostmem = 0.0f; cudaArray_t tex_buf; cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaExtent extent; extent.width = PIC_WIDTH; extent.height = PIC_HEIGHT; extent.depth = picLayerNum; checkCudaErrors(cudaMalloc3DArray(&tex_buf, &desc, extent, cudaArrayLayered)); // generate texture object for reading cudaTextureObject_t texInput; cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = tex_buf; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = 0; //Indicates whether texture reads are normalized or not texDescr.filterMode = cudaFilterModeLinear; texDescr.addressMode[0] = cudaAddressModeClamp; texDescr.addressMode[1] = cudaAddressModeClamp; texDescr.addressMode[2] = cudaAddressModeClamp; texDescr.readMode = cudaReadModeElementType; /**< Read texture as specified element type */ checkCudaErrors(cudaCreateTextureObject(&texInput, &texRes, &texDescr, NULL)); /*---------------- for copy data --------------------- */ cudaMemcpy3DParms myparms = { 0 }; myparms.srcPos = make_cudaPos(0, 0, 0); myparms.dstPos = make_cudaPos(0, 0, 0); myparms.srcPtr = make_cudaPitchedPtr(pictureSET, PIC_WIDTH * sizeof(float), PIC_WIDTH, PIC_HEIGHT); myparms.dstArray = tex_buf; myparms.extent = make_cudaExtent(PIC_WIDTH, PIC_HEIGHT, picLayerNum); myparms.kind = cudaMemcpyHostToDevice; checkCudaErrors(cudaMemcpy3D(&myparms)); float *deviceMem; checkCudaErrors(cudaMalloc((void**)&deviceMem, sizeof(float))); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsedTime; dim3 grid_tex(1, 1); dim3 thread_tex(1, 1); cudaEventRecord(start, 0); readTextureLayerByLayer << < grid_tex, thread_tex >> > (deviceMem, texInput); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("time used layerbylayer = %0.0f\n", elapsedTime); checkCudaErrors(cudaMemcpy(&hostmem, deviceMem, sizeof(float), cudaMemcpyDeviceToHost)); printf("result = %f\n", hostmem); cudaEventRecord(start, 0); readTextureDotByDot << < grid_tex, thread_tex >> > (deviceMem, texInput); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("time used dotbydot = %0.0f\n", elapsedTime); checkCudaErrors(cudaMemcpy(&hostmem, deviceMem, sizeof(float), cudaMemcpyDeviceToHost)); printf("result = %f\n", hostmem); checkCudaErrors(cudaFreeArray(tex_buf)); checkCudaErrors(cudaDestroyTextureObject(texInput)); float* tex_data[picLayerNum]; size_t pitch[picLayerNum]; cudaResourceDesc resDesc; cudaTextureDesc texDesc; texObjtStrut texContainer; for (int i = 0; i < picLayerNum; i++){ cudaMallocPitch(&tex_data[i], &pitch[i], sizeof(float)*PIC_WIDTH, PIC_HEIGHT); memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = tex_data[i]; resDesc.res.pitch2D.pitchInBytes = pitch[i]; resDesc.res.pitch2D.width = PIC_WIDTH; resDesc.res.pitch2D.height = PIC_HEIGHT; resDesc.res.pitch2D.desc = cudaCreateChannelDesc<float>(); memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; texDesc.addressMode[0] =// X axis texDesc.addressMode[1] = cudaAddressModeClamp;// Y axis texDesc.filterMode = cudaFilterModeLinear; cudaCreateTextureObject(&texContainer.texAry[i], &resDesc, &texDesc, NULL); checkCudaErrors(cudaMemcpy2D(tex_data[i], pitch[i], &pictureSET[i], sizeof(float) * PIC_WIDTH, sizeof(float) * PIC_WIDTH, PIC_HEIGHT, cudaMemcpyHostToDevice)); } cudaEventRecord(start, 0); readTextureObjtByObjt << < grid_tex, thread_tex >> > (deviceMem, texContainer); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("time used objtbyobjt = %0.0f\n", elapsedTime); checkCudaErrors(cudaMemcpy(&hostmem, deviceMem, sizeof(float), cudaMemcpyDeviceToHost)); printf("result = %f\n", hostmem); for (int i = 0; i < picLayerNum; i++){ checkCudaErrors(cudaFree(tex_data[i])); checkCudaErrors(cudaDestroyTextureObject(texContainer.texAry[i])); } checkCudaErrors(cudaFreeHost(pictureSET)); cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); return 0; }
5509749d3d79a34621b3bacff377322c3eff48e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::conv2d(const Tensor& input, int outChannels, int kernelH, int kernelW, int strideH, int strideW, int paddingH, int paddingW, ActiMode activation, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } assert(input.numDim == 4); /*NCHW*/ Conv2D *conv = new Conv2D(*this, input, outChannels, kernelH, kernelW, strideH, strideW, paddingH, paddingW, activation, use_bias, shared_op, kernel_initializer, bias_initializer); layers.push_back(conv); return conv->outputs[0]; } Conv2D* FFModel::conv2d(int inChannels, int outChannels, int kernelH, int kernelW, int strideH, int strideW, int paddingH, int paddingW, ActiMode activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } Conv2D *conv = new Conv2D(*this, inChannels, outChannels, kernelH, kernelW, strideH, strideW, paddingH, paddingW, activation, use_bias, kernel_initializer, bias_initializer); layers.push_back(conv); return conv; } /* locals[0] = kernel locals[1] = bias */ Conv2D::Conv2D(FFModel& model, const Tensor& _input, int out_dim, int _kernel_h, int _kernel_w, int _stride_h, int _stride_w, int _padding_h, int _padding_w, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer) : Op(model, OP_CONV2D, shared_op, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), _input), in_channels(_input.adim[2]), out_channels(out_dim), kernel_h(_kernel_h), kernel_w(_kernel_w), stride_h(_stride_h), stride_w(_stride_w), padding_h(_padding_h), padding_w(_padding_w), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer), profiling(model.config.profiling) { assert(_input.numDim == 4); // Set output shape int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; numOutputs = 1; outputs[0].numDim = 4; outputs[0].adim[0] = output_w; outputs[0].adim[1] = output_h; outputs[0].adim[2] = output_c; outputs[0].adim[3] = output_n; weights[0].numDim = 4; weights[0].adim[0] = kernel_w; weights[0].adim[1] = kernel_h; weights[0].adim[2] = in_channels; weights[0].adim[3] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } Conv2D::Conv2D(FFModel& model, int in_dim, int out_dim, int _kernel_h, int _kernel_w, int _stride_h, int _stride_w, int _padding_h, int _padding_w, ActiMode _activation, bool _use_bias, Initializer* _kernel_initializer, Initializer* _bias_initializer) : Op(model, OP_CONV2D, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), 1), in_channels(in_dim), out_channels(out_dim), kernel_h(_kernel_h), kernel_w(_kernel_w), stride_h(_stride_h), stride_w(_stride_w), padding_h(_padding_h), padding_w(_padding_w), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer), profiling(model.config.profiling) { } Tensor Conv2D::init_inout(FFModel& model, const Tensor& _input) { assert(_input.numDim == 4); assert(_input.adim[2] == in_channels); inputs[0] = _input; create_output_and_partition(model); return outputs[0]; } void Conv2D::create_weights(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); // Create kernel { const int dims[4] = {out_channels, in_channels, kernel_h, kernel_w}; weights[0] = model.create_conv_weight<4>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, kernel_initializer); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, bias_initializer); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Conv2D::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); // Create output tensor int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; { const int dims[4] = {output_n, output_c, output_h, output_w}; outputs[0] = model.create_tensor<4>(dims, (IndexSpaceT<4>)task_is, DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<4> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Currently assume we didn't split across the channel dimension assert(num_par_c == 1); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition( inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]); } } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y); cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw); cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx); /* regions[0]: input regions[1]: output regions[2](I): filter regions[3](I): bias regions[4](O): filter_grad regions[5](O): input_grad */ __host__ OpMeta* Conv2D::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 6); assert(task->regions.size() == 6); const Conv2D* conv = (Conv2D*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_kernel_grad( regions[4], task->regions[4], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorW<float, 4> acc_input_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, false/*readOutput*/); Conv2DMeta* m = new Conv2DMeta(handle); m->relu = conv->activation == AC_MODE_RELU; int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1; int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1; int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1; int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1; printf("init conv (input): n(%d) c(%d) h(%d) w(%d)\n", input_n, input_c, input_h, input_w); printf("init conv (output): n(%d) c(%d) h(%d) w(%d)\n", output_n, output_c, output_h, output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); printf("filterDim: kernel(%d %d) c_in(%d), c_out(%d)\n", conv->kernel_h, conv->kernel_w, input_c, output_c); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c, conv->kernel_h, conv->kernel_w)); //printf("convDim: padding(%d %d) stride(%d %d)\n", conv->padding_h, conv->padding_w, conv->stride_h, conv->stride_w); int pad_h = ((output_h - 1) * conv->stride_h + conv->kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * conv->stride_w + conv->kernel_w - input_w + 1) / 2; if (pad_h != conv->padding_h) printf("Warning: changing conv_padding_h to satisfy output_h size\n"); if (pad_w != conv->padding_w) printf("Warning: changing conv_padding_w to satisfy output_w size\n"); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h,//conv->padding_h, pad_w,//conv->padding_w, conv->stride_h, conv->stride_w, 1/*upscale_x*/, 1/*upscale_y*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); // enable tensor core when possible checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // select forward algorithm m->fwdAlgo = selectConvolutionForwardAlgorithm(m->handle.dnn, m->inputTensor, acc_input.ptr, m->filterDesc, acc_kernel.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->outputTensor, acc_output.ptr); // select backward filter algorithm m->bwdFilterAlgo = selectConvolutionBackwardFilterAlgorithm( m->handle.dnn, m->inputTensor, acc_input.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->filterDesc, acc_kernel_grad.ptr); // select backward data algorithm m->bwdDataAlgo = selectConvolutionBackwardDataAlgorithm( m->handle.dnn, m->filterDesc, acc_kernel.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->inputTensor, acc_input_grad.ptr); if (m->relu) { checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } return m; } void Conv2D::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(4, pcname, pc); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(CONV2D_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(4, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(5, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } void Conv2D::forward_kernel(const Conv2DMeta* m, const float* input_ptr, float* output_ptr, const float* filter_ptr, const float* bias_ptr) const { float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnConvolutionForward(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->filterDesc, filter_ptr, m->convDesc, m->fwdAlgo, m->handle.workSpace, m->handle.workSpaceSize, &beta, m->outputTensor, output_ptr)); checkCUDNN(cudnnAddTensor(m->handle.dnn, &alpha, m->biasTensor, bias_ptr, &alpha, m->outputTensor, output_ptr)); if (m->relu) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } } /* regions[0](I): input regions[1](O): output regions[2](I): filter regions[3](I): bias */ __host__ void Conv2D::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); //printf("fwdAlgo(%d), bwdFilterALgo(%d), bwdDataAlgo(%d)\n", (int)m->fwdAlgo,(int) m->bwdFilterAlgo,(int) m->bwdDataAlgo); hipEvent_t t_start, t_end; if (conv->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif conv->forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias.ptr); if (conv->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); //print_tensor<4, float>(acc_input.ptr, acc_input.rect, "[Conv2D:forward:input]"); //print_tensor<4, float>(acc_kernel.ptr, acc_kernel.rect, "[Conv2D:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Conv2D:forward:bias]"); //print_tensor<4, float>(acc_output.ptr, acc_output.rect, "[Conv2D:forward:output]"); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("Conv2D forward time (CF) = %.2fms\n", elapsed); } } __host__ void Conv2D::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } void Conv2D::backward_kernel(const Conv2DMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr) const { float alpha = 1.0f; //float beta = 0.0f; if (m->relu) { cudnnDataType_t dataType; int n, c, h, w, nStride, cStride, hStride, wStride; checkCUDNN(cudnnGetTensor4dDescriptor(m->outputTensor, &dataType, &n, &c, &h, &w, &nStride, &cStride, &hStride, &wStride)); hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n*c*h*w)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, n*c*h*w); } // Compute filter gradiant // NOTE: we use alpha for kernel_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardFilter(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdFilterAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->filterDesc, kernel_grad_ptr)); // Compute bias gradiant // NOTE: we use alpha for bias_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardBias(m->handle.dnn, &alpha, m->outputTensor, output_grad_ptr, &alpha, m->biasTensor, bias_grad_ptr)); // Compute data gradiant // NOTE: we use alpha for input_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardData(m->handle.dnn, &alpha, m->filterDesc, kernel_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdDataAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->inputTensor, input_grad_ptr)); } /* regions[0](I): input regions[1](I/O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](I/O): filter_grad regions[6](I/O): bias_grad */ __host__ void Conv2D::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 7); assert(task->regions.size() == 7); Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_input_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 4> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_kernel_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, true/*readOutput*/); hipEvent_t t_start, t_end; if (conv->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif conv->backward_kernel(m, acc_input.ptr, acc_input_grad.ptr, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad.ptr); if (conv->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("Conv2D backward time = %.2fms\n", elapsed); //print_tensor<4, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Conv2D:backward:output_grad]"); //print_tensor<4, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Conv2D:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Conv2D:backward:bias_grad]"); //print_tensor<4, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Conv2D:backward:input_grad]"); } } __host__ void Conv2D::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); // TODO: remove this line //if (first_layer) //fm.wait_all_results(); } #ifdef DEADCODE /* regions[0](I/O): filter regions[1](I): filter_grad regions[2](I/O): bias regions[3](I): bias_grad */ __host__ void Conv2D::update_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Conv2D* conv = (Conv2D*) task->args; const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA); const AccessorRO<float, 1> acc_filter_grad(regions[1], FID_DATA); const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias_grad(regions[3], FID_DATA); Rect<1> rect_filter, rect_filter_grad, rect_bias, rect_bias_grad; rect_filter = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_filter_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); size_t filter_size = rect_filter.volume(); size_t bias_size = rect_bias.volume(); assert(filter_size == conv->in_channels * conv->out_channels * conv->kernel_w * conv->kernel_h); assert(bias_size == conv->out_channels); assert(filter_size * conv->num_replica == rect_filter_grad.volume()); assert(bias_size * conv->num_replica == rect_bias_grad.volume()); assert(acc_filter.accessor.is_dense_arbitrary(rect_filter)); assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); float *filter_ptr = acc_filter.ptr(rect_filter.lo); const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); updateGAS(filter_ptr, filter_grad_ptr, filter_size, conv->num_replica, conv->learning_rate); updateGAS(bias_ptr, bias_grad_ptr, bias_size, conv->num_replica, conv->learning_rate); } __host__ void Conv2D::update(const FFModel& ff) { // Synchronize the learning rate learning_rate = ff.config.learningRate; assert(num_replica > 0); // Only aggregate parameters if more than one replica if (num_replica > 1) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; TaskLauncher launcher(CONV2D_UPD_TASK_ID, TaskArgument(this, sizeof(Conv2D))); launcher.add_region_requirement( RegionRequirement(locals[0].region, READ_WRITE, EXCLUSIVE, locals[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[0].region_grad, READ_ONLY, EXCLUSIVE, locals[0].region_grad)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad)); launcher.add_field(3, FID_DATA); runtime->execute_task(ctx, launcher); } } #endif /* __host__ Parameter* Conv2D::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1) { return &weights[1]; } else { assert(0); return NULL; } }*/ __host__ void Conv2D::print_layer(const FFModel& ff) { printf("conv2d layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; #if 0 TaskLauncher launcher(CONV2D_PRINT_TASK_ID, TaskArgument(NULL, 0)); launcher.add_region_requirement( RegionRequirement(kernel.region, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.region, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(1, FID_DATA); Future fu = runtime->execute_task(ctx, launcher); fu.wait(); #else RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); /* RegionRequirement kernel_grad_req(kernel.region_grad, READ_WRITE, EXCLUSIVE, kernel.region_grad); kernel_grad_req.add_field(FID_DATA); InlineLauncher kernel_grad_launcher(kernel_grad_req); PhysicalRegion kernel_grad_region = runtime->map_region(ctx, kernel_grad_launcher); kernel_grad_region.wait_until_valid(); */ RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); /* RegionRequirement bias_grad_req(bias.region_grad, READ_WRITE, EXCLUSIVE, bias.region_grad); bias_grad_req.add_field(FID_DATA); InlineLauncher bias_grad_launcher(bias_grad_req); PhysicalRegion bias_grad_region = runtime->map_region(ctx, bias_grad_launcher); bias_grad_region.wait_until_valid(); */ TensorAccessorW<float, 4> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); // const AccessorRW<float, 1> acc_kernel_grad(kernel_grad_region, FID_DATA); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); //const AccessorRW<float, 1> acc_bias_grad(bias_grad_region, FID_DATA); const float *kernel_ptr = acc_kernel.ptr; //float *kernel_grad_ptr = acc_kernel_grad.ptr; const float *bias_ptr = acc_bias.ptr; //float *bias_grad_ptr = acc_bias_grad.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; int kernel_dim3 = acc_kernel.rect.hi[2] - acc_kernel.rect.lo[2] + 1; int kernel_dim4 = acc_kernel.rect.hi[3] - acc_kernel.rect.lo[3] + 1; //size_t kernel_grad_size = rect_kernel_grad.volume(); size_t bias_size = acc_bias.rect.volume(); //size_t bias_grad_size = rect_bias_grad.volume(); printf("kernel, %p, %d, [%d, %d, %d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2, kernel_dim3, kernel_dim4); //printf("kernel_grad, %d\n", kernel_grad_size); printf("bias, %p, %d\n", bias_ptr, bias_size); //printf("bias_grad, %d\n", bias_grad_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); /* for (int i = 0; i < bias_grad_size; i++) { printf("%f ", bias_grad_ptr); bias_grad_ptr ++; } printf("\n");*/ for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); /* for (int i = 0; i < kernel_grad_size; i++) { printf("%f ", kernel_grad_ptr); kernel_grad_ptr ++; } printf("\n"); */ runtime->unmap_region(ctx, kernel_region); // runtime->unmap_region(ctx, kernel_grad_region); runtime->unmap_region(ctx, bias_region); // runtime->unmap_region(ctx, bias_grad_region); #endif } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( handle, xDesc, x, wDesc, w, convDesc, yDesc, y, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("forwardAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( handle, xDesc, x, dyDesc, dy, convDesc, dwDesc, dw, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdFilterAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( handle, wDesc, w, dyDesc, dy, convDesc, dxDesc, dx, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdDataAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } Conv2DMeta::Conv2DMeta(FFHandler handler) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); } bool Conv2D::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { Tensor sub_output, sub_input; if(!outputs[0].get_output_sub_tensor(pc, sub_output, OP_CONV2D)) return false; if(!inputs[0].get_input_sub_tensor(pc, sub_input, OP_CONV2D)) return false; int input_w = sub_input.adim[0]; int input_h = sub_input.adim[1]; int input_c = sub_input.adim[2]; int input_n = sub_input.adim[3]; int output_w = sub_output.adim[0]; int output_h = sub_output.adim[1]; int output_c = sub_output.adim[2]; int output_n = sub_output.adim[3]; int pad_h = ((output_h - 1) * stride_h + kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * stride_w + kernel_w - input_w + 1) / 2; Conv2DMeta* m = sim->conv2d_meta; m->relu = activation == AC_MODE_RELU; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c, kernel_h, kernel_w)); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h, pad_w, stride_h, stride_w, 1/*dilationH*/, 1/*dilationW*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* weight_ptr = (float*)sim->allocate((size_t)output_c * input_c * kernel_h * kernel_w, DT_FLOAT); assert(weight_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); // select forward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->filterDesc, weight_ptr, m->convDesc, m->outputTensor, output_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); forward_time = perfResults[0].time; //for (int i = 0; i < cnt; i++) // printf("conv forward: algo(%d) time(%.4lf)\n", perfResults[i].algo, perfResults[i].time); } // select forward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->convDesc, m->filterDesc, weight_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); backward_time = perfResults[0].time; } { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( m->handle.dnn, m->filterDesc, weight_ptr, m->outputTensor, output_ptr, m->convDesc, m->inputTensor, input_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); backward_time += perfResults[0].time; } printf("[Measure Conv2D] input(%d %d %d %d) output(%d %d %d %d) forward_time(%.4lf) backward_time(%.4lf)\n", input_n, input_c, input_h, input_w, output_n, output_c, output_h, output_w, forward_time, backward_time); return true; }
5509749d3d79a34621b3bacff377322c3eff48e8.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::conv2d(const Tensor& input, int outChannels, int kernelH, int kernelW, int strideH, int strideW, int paddingH, int paddingW, ActiMode activation, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } assert(input.numDim == 4); /*NCHW*/ Conv2D *conv = new Conv2D(*this, input, outChannels, kernelH, kernelW, strideH, strideW, paddingH, paddingW, activation, use_bias, shared_op, kernel_initializer, bias_initializer); layers.push_back(conv); return conv->outputs[0]; } Conv2D* FFModel::conv2d(int inChannels, int outChannels, int kernelH, int kernelW, int strideH, int strideW, int paddingH, int paddingW, ActiMode activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } Conv2D *conv = new Conv2D(*this, inChannels, outChannels, kernelH, kernelW, strideH, strideW, paddingH, paddingW, activation, use_bias, kernel_initializer, bias_initializer); layers.push_back(conv); return conv; } /* locals[0] = kernel locals[1] = bias */ Conv2D::Conv2D(FFModel& model, const Tensor& _input, int out_dim, int _kernel_h, int _kernel_w, int _stride_h, int _stride_w, int _padding_h, int _padding_w, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer) : Op(model, OP_CONV2D, shared_op, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), _input), in_channels(_input.adim[2]), out_channels(out_dim), kernel_h(_kernel_h), kernel_w(_kernel_w), stride_h(_stride_h), stride_w(_stride_w), padding_h(_padding_h), padding_w(_padding_w), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer), profiling(model.config.profiling) { assert(_input.numDim == 4); // Set output shape int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; numOutputs = 1; outputs[0].numDim = 4; outputs[0].adim[0] = output_w; outputs[0].adim[1] = output_h; outputs[0].adim[2] = output_c; outputs[0].adim[3] = output_n; weights[0].numDim = 4; weights[0].adim[0] = kernel_w; weights[0].adim[1] = kernel_h; weights[0].adim[2] = in_channels; weights[0].adim[3] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } Conv2D::Conv2D(FFModel& model, int in_dim, int out_dim, int _kernel_h, int _kernel_w, int _stride_h, int _stride_w, int _padding_h, int _padding_w, ActiMode _activation, bool _use_bias, Initializer* _kernel_initializer, Initializer* _bias_initializer) : Op(model, OP_CONV2D, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), 1), in_channels(in_dim), out_channels(out_dim), kernel_h(_kernel_h), kernel_w(_kernel_w), stride_h(_stride_h), stride_w(_stride_w), padding_h(_padding_h), padding_w(_padding_w), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer), profiling(model.config.profiling) { } Tensor Conv2D::init_inout(FFModel& model, const Tensor& _input) { assert(_input.numDim == 4); assert(_input.adim[2] == in_channels); inputs[0] = _input; create_output_and_partition(model); return outputs[0]; } void Conv2D::create_weights(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); // Create kernel { const int dims[4] = {out_channels, in_channels, kernel_h, kernel_w}; weights[0] = model.create_conv_weight<4>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, kernel_initializer); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, bias_initializer); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Conv2D::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); // Create output tensor int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; { const int dims[4] = {output_n, output_c, output_h, output_w}; outputs[0] = model.create_tensor<4>(dims, (IndexSpaceT<4>)task_is, DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<4> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Currently assume we didn't split across the channel dimension assert(num_par_c == 1); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition( inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]); } } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y); cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw); cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx); /* regions[0]: input regions[1]: output regions[2](I): filter regions[3](I): bias regions[4](O): filter_grad regions[5](O): input_grad */ __host__ OpMeta* Conv2D::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 6); assert(task->regions.size() == 6); const Conv2D* conv = (Conv2D*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_kernel_grad( regions[4], task->regions[4], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorW<float, 4> acc_input_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, false/*readOutput*/); Conv2DMeta* m = new Conv2DMeta(handle); m->relu = conv->activation == AC_MODE_RELU; int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1; int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1; int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1; int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1; printf("init conv (input): n(%d) c(%d) h(%d) w(%d)\n", input_n, input_c, input_h, input_w); printf("init conv (output): n(%d) c(%d) h(%d) w(%d)\n", output_n, output_c, output_h, output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); printf("filterDim: kernel(%d %d) c_in(%d), c_out(%d)\n", conv->kernel_h, conv->kernel_w, input_c, output_c); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c, conv->kernel_h, conv->kernel_w)); //printf("convDim: padding(%d %d) stride(%d %d)\n", conv->padding_h, conv->padding_w, conv->stride_h, conv->stride_w); int pad_h = ((output_h - 1) * conv->stride_h + conv->kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * conv->stride_w + conv->kernel_w - input_w + 1) / 2; if (pad_h != conv->padding_h) printf("Warning: changing conv_padding_h to satisfy output_h size\n"); if (pad_w != conv->padding_w) printf("Warning: changing conv_padding_w to satisfy output_w size\n"); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h,//conv->padding_h, pad_w,//conv->padding_w, conv->stride_h, conv->stride_w, 1/*upscale_x*/, 1/*upscale_y*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); // enable tensor core when possible checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // select forward algorithm m->fwdAlgo = selectConvolutionForwardAlgorithm(m->handle.dnn, m->inputTensor, acc_input.ptr, m->filterDesc, acc_kernel.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->outputTensor, acc_output.ptr); // select backward filter algorithm m->bwdFilterAlgo = selectConvolutionBackwardFilterAlgorithm( m->handle.dnn, m->inputTensor, acc_input.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->filterDesc, acc_kernel_grad.ptr); // select backward data algorithm m->bwdDataAlgo = selectConvolutionBackwardDataAlgorithm( m->handle.dnn, m->filterDesc, acc_kernel.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->inputTensor, acc_input_grad.ptr); if (m->relu) { checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } return m; } void Conv2D::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(4, pcname, pc); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(CONV2D_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(4, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(5, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } void Conv2D::forward_kernel(const Conv2DMeta* m, const float* input_ptr, float* output_ptr, const float* filter_ptr, const float* bias_ptr) const { float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnConvolutionForward(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->filterDesc, filter_ptr, m->convDesc, m->fwdAlgo, m->handle.workSpace, m->handle.workSpaceSize, &beta, m->outputTensor, output_ptr)); checkCUDNN(cudnnAddTensor(m->handle.dnn, &alpha, m->biasTensor, bias_ptr, &alpha, m->outputTensor, output_ptr)); if (m->relu) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } } /* regions[0](I): input regions[1](O): output regions[2](I): filter regions[3](I): bias */ __host__ void Conv2D::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); //printf("fwdAlgo(%d), bwdFilterALgo(%d), bwdDataAlgo(%d)\n", (int)m->fwdAlgo,(int) m->bwdFilterAlgo,(int) m->bwdDataAlgo); cudaEvent_t t_start, t_end; if (conv->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif conv->forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias.ptr); if (conv->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); //print_tensor<4, float>(acc_input.ptr, acc_input.rect, "[Conv2D:forward:input]"); //print_tensor<4, float>(acc_kernel.ptr, acc_kernel.rect, "[Conv2D:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Conv2D:forward:bias]"); //print_tensor<4, float>(acc_output.ptr, acc_output.rect, "[Conv2D:forward:output]"); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Conv2D forward time (CF) = %.2fms\n", elapsed); } } __host__ void Conv2D::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } void Conv2D::backward_kernel(const Conv2DMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr) const { float alpha = 1.0f; //float beta = 0.0f; if (m->relu) { cudnnDataType_t dataType; int n, c, h, w, nStride, cStride, hStride, wStride; checkCUDNN(cudnnGetTensor4dDescriptor(m->outputTensor, &dataType, &n, &c, &h, &w, &nStride, &cStride, &hStride, &wStride)); reluBackward<<<GET_BLOCKS(n*c*h*w), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, n*c*h*w); } // Compute filter gradiant // NOTE: we use alpha for kernel_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardFilter(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdFilterAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->filterDesc, kernel_grad_ptr)); // Compute bias gradiant // NOTE: we use alpha for bias_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardBias(m->handle.dnn, &alpha, m->outputTensor, output_grad_ptr, &alpha, m->biasTensor, bias_grad_ptr)); // Compute data gradiant // NOTE: we use alpha for input_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardData(m->handle.dnn, &alpha, m->filterDesc, kernel_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdDataAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->inputTensor, input_grad_ptr)); } /* regions[0](I): input regions[1](I/O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](I/O): filter_grad regions[6](I/O): bias_grad */ __host__ void Conv2D::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 7); assert(task->regions.size() == 7); Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_input_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 4> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_kernel_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, true/*readOutput*/); cudaEvent_t t_start, t_end; if (conv->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif conv->backward_kernel(m, acc_input.ptr, acc_input_grad.ptr, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad.ptr); if (conv->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Conv2D backward time = %.2fms\n", elapsed); //print_tensor<4, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Conv2D:backward:output_grad]"); //print_tensor<4, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Conv2D:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Conv2D:backward:bias_grad]"); //print_tensor<4, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Conv2D:backward:input_grad]"); } } __host__ void Conv2D::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); // TODO: remove this line //if (first_layer) //fm.wait_all_results(); } #ifdef DEADCODE /* regions[0](I/O): filter regions[1](I): filter_grad regions[2](I/O): bias regions[3](I): bias_grad */ __host__ void Conv2D::update_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Conv2D* conv = (Conv2D*) task->args; const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA); const AccessorRO<float, 1> acc_filter_grad(regions[1], FID_DATA); const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias_grad(regions[3], FID_DATA); Rect<1> rect_filter, rect_filter_grad, rect_bias, rect_bias_grad; rect_filter = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_filter_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); size_t filter_size = rect_filter.volume(); size_t bias_size = rect_bias.volume(); assert(filter_size == conv->in_channels * conv->out_channels * conv->kernel_w * conv->kernel_h); assert(bias_size == conv->out_channels); assert(filter_size * conv->num_replica == rect_filter_grad.volume()); assert(bias_size * conv->num_replica == rect_bias_grad.volume()); assert(acc_filter.accessor.is_dense_arbitrary(rect_filter)); assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); float *filter_ptr = acc_filter.ptr(rect_filter.lo); const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); updateGAS(filter_ptr, filter_grad_ptr, filter_size, conv->num_replica, conv->learning_rate); updateGAS(bias_ptr, bias_grad_ptr, bias_size, conv->num_replica, conv->learning_rate); } __host__ void Conv2D::update(const FFModel& ff) { // Synchronize the learning rate learning_rate = ff.config.learningRate; assert(num_replica > 0); // Only aggregate parameters if more than one replica if (num_replica > 1) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; TaskLauncher launcher(CONV2D_UPD_TASK_ID, TaskArgument(this, sizeof(Conv2D))); launcher.add_region_requirement( RegionRequirement(locals[0].region, READ_WRITE, EXCLUSIVE, locals[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[0].region_grad, READ_ONLY, EXCLUSIVE, locals[0].region_grad)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad)); launcher.add_field(3, FID_DATA); runtime->execute_task(ctx, launcher); } } #endif /* __host__ Parameter* Conv2D::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1) { return &weights[1]; } else { assert(0); return NULL; } }*/ __host__ void Conv2D::print_layer(const FFModel& ff) { printf("conv2d layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; #if 0 TaskLauncher launcher(CONV2D_PRINT_TASK_ID, TaskArgument(NULL, 0)); launcher.add_region_requirement( RegionRequirement(kernel.region, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.region, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(1, FID_DATA); Future fu = runtime->execute_task(ctx, launcher); fu.wait(); #else RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); /* RegionRequirement kernel_grad_req(kernel.region_grad, READ_WRITE, EXCLUSIVE, kernel.region_grad); kernel_grad_req.add_field(FID_DATA); InlineLauncher kernel_grad_launcher(kernel_grad_req); PhysicalRegion kernel_grad_region = runtime->map_region(ctx, kernel_grad_launcher); kernel_grad_region.wait_until_valid(); */ RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); /* RegionRequirement bias_grad_req(bias.region_grad, READ_WRITE, EXCLUSIVE, bias.region_grad); bias_grad_req.add_field(FID_DATA); InlineLauncher bias_grad_launcher(bias_grad_req); PhysicalRegion bias_grad_region = runtime->map_region(ctx, bias_grad_launcher); bias_grad_region.wait_until_valid(); */ TensorAccessorW<float, 4> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); // const AccessorRW<float, 1> acc_kernel_grad(kernel_grad_region, FID_DATA); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); //const AccessorRW<float, 1> acc_bias_grad(bias_grad_region, FID_DATA); const float *kernel_ptr = acc_kernel.ptr; //float *kernel_grad_ptr = acc_kernel_grad.ptr; const float *bias_ptr = acc_bias.ptr; //float *bias_grad_ptr = acc_bias_grad.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; int kernel_dim3 = acc_kernel.rect.hi[2] - acc_kernel.rect.lo[2] + 1; int kernel_dim4 = acc_kernel.rect.hi[3] - acc_kernel.rect.lo[3] + 1; //size_t kernel_grad_size = rect_kernel_grad.volume(); size_t bias_size = acc_bias.rect.volume(); //size_t bias_grad_size = rect_bias_grad.volume(); printf("kernel, %p, %d, [%d, %d, %d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2, kernel_dim3, kernel_dim4); //printf("kernel_grad, %d\n", kernel_grad_size); printf("bias, %p, %d\n", bias_ptr, bias_size); //printf("bias_grad, %d\n", bias_grad_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); /* for (int i = 0; i < bias_grad_size; i++) { printf("%f ", bias_grad_ptr); bias_grad_ptr ++; } printf("\n");*/ for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); /* for (int i = 0; i < kernel_grad_size; i++) { printf("%f ", kernel_grad_ptr); kernel_grad_ptr ++; } printf("\n"); */ runtime->unmap_region(ctx, kernel_region); // runtime->unmap_region(ctx, kernel_grad_region); runtime->unmap_region(ctx, bias_region); // runtime->unmap_region(ctx, bias_grad_region); #endif } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( handle, xDesc, x, wDesc, w, convDesc, yDesc, y, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("forwardAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( handle, xDesc, x, dyDesc, dy, convDesc, dwDesc, dw, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdFilterAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( handle, wDesc, w, dyDesc, dy, convDesc, dxDesc, dx, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdDataAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } Conv2DMeta::Conv2DMeta(FFHandler handler) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); } bool Conv2D::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { Tensor sub_output, sub_input; if(!outputs[0].get_output_sub_tensor(pc, sub_output, OP_CONV2D)) return false; if(!inputs[0].get_input_sub_tensor(pc, sub_input, OP_CONV2D)) return false; int input_w = sub_input.adim[0]; int input_h = sub_input.adim[1]; int input_c = sub_input.adim[2]; int input_n = sub_input.adim[3]; int output_w = sub_output.adim[0]; int output_h = sub_output.adim[1]; int output_c = sub_output.adim[2]; int output_n = sub_output.adim[3]; int pad_h = ((output_h - 1) * stride_h + kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * stride_w + kernel_w - input_w + 1) / 2; Conv2DMeta* m = sim->conv2d_meta; m->relu = activation == AC_MODE_RELU; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c, kernel_h, kernel_w)); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h, pad_w, stride_h, stride_w, 1/*dilationH*/, 1/*dilationW*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* weight_ptr = (float*)sim->allocate((size_t)output_c * input_c * kernel_h * kernel_w, DT_FLOAT); assert(weight_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); // select forward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->filterDesc, weight_ptr, m->convDesc, m->outputTensor, output_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); forward_time = perfResults[0].time; //for (int i = 0; i < cnt; i++) // printf("conv forward: algo(%d) time(%.4lf)\n", perfResults[i].algo, perfResults[i].time); } // select forward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->convDesc, m->filterDesc, weight_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); backward_time = perfResults[0].time; } { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( m->handle.dnn, m->filterDesc, weight_ptr, m->outputTensor, output_ptr, m->convDesc, m->inputTensor, input_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); backward_time += perfResults[0].time; } printf("[Measure Conv2D] input(%d %d %d %d) output(%d %d %d %d) forward_time(%.4lf) backward_time(%.4lf)\n", input_n, input_c, input_h, input_w, output_n, output_c, output_h, output_w, forward_time, backward_time); return true; }
28e8a17168e8e51eb280d3fb8fd6b9f3563b7bbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* !===================================================================== ! ! S p e c f e m 3 D V e r s i o n 3 . 0 ! --------------------------------------- ! ! Main historical authors: Dimitri Komatitsch and Jeroen Tromp ! CNRS, France ! and Princeton University, USA ! (there are currently many more authors!) ! (c) October 2017 ! ! This program is free software; you can redistribute it and/or modify ! it under the terms of the GNU General Public License as published by ! the Free Software Foundation; either version 3 of the License, or ! (at your option) any later version. ! ! This program is distributed in the hope that it will be useful, ! but WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ! GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along ! with this program; if not, write to the Free Software Foundation, Inc., ! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ! !===================================================================== */ #include "mesh_constants_cuda.h" /* ----------------------------------------------------------------------------------------------- */ #ifdef USE_TEXTURES_FIELDS realw_texture d_potential_tex; realw_texture d_potential_dot_dot_tex; //backward/reconstructed realw_texture d_b_potential_tex; realw_texture d_b_potential_dot_dot_tex; //note: texture variables are implicitly static, and cannot be passed as arguments to cuda kernels; // thus, 1) we thus use if-statements (FORWARD_OR_ADJOINT) to determine from which texture to fetch from // 2) we use templates // since if-statements are a bit slower as the variable is only known at runtime, we use option 2) // templates definitions template<int FORWARD_OR_ADJOINT> __device__ float texfetch_potential(int x); template<int FORWARD_OR_ADJOINT> __device__ float texfetch_potential_dot_dot(int x); // templates for texture fetching // FORWARD_OR_ADJOINT == 1 <- forward arrays template<> __device__ float texfetch_potential<1>(int x) { return tex1Dfetch(d_potential_tex, x); } template<> __device__ float texfetch_potential_dot_dot<1>(int x) { return tex1Dfetch(d_potential_dot_dot_tex, x); } // FORWARD_OR_ADJOINT == 3 <- backward/reconstructed arrays template<> __device__ float texfetch_potential<3>(int x) { return tex1Dfetch(d_b_potential_tex, x); } template<> __device__ float texfetch_potential_dot_dot<3>(int x) { return tex1Dfetch(d_b_potential_dot_dot_tex, x); } #endif #ifdef USE_TEXTURES_CONSTANTS extern realw_texture d_hprime_xx_tex; #endif // note on performance optimizations: // // instead of providing spezialized kernel routines (without mesh coloring, without gravity, etc.), // we only provide one "general" kernel to handle all cases. this reduces code redundancy and improves code readability. // as tradeoff, we take a little performance hit of around ~ 3% // // performance tests done: // - registers: we were trying to reduce the number of registers, as this is the main limiter for the // occupancy of the kernel. however, there is only little difference in register pressure for one "general" kernel // or multiple "spezialized" kernels. reducing registers is mainly achieved through the launch_bonds() directive. // - branching: we were trying to reduce code branches, such as the if-active check in earlier code versions. // reducing the branching helps the compiler to better optimize the executable. // - memory accesses: the global memory accesses are avoiding texture reads for coalescent arrays, as this is // still faster. thus we were using no __ldg() loads or __restricted__ pointer usage, // as those implicitly lead the compiler to use texture reads. // - arithmetic intensity: ratio of floating-point operations vs. memory accesses is still low for our kernels. // tests with using a loop over elements to re-use the constant arrays (like hprime, wgllwgll,..) and thus // increasing the arithmetic intensity failed because the number of registers increased as well. // this increased register pressure reduced the occupancy and slowed down the kernel performance. // - hiding memory latency: to minimize waiting times to retrieve a memory value from global memory, we put // some more calculations into the same code block before calling syncthreads(). this should help the // compiler to move independent calculations to wherever it can overlap it with memory access operations. // note, especially the if (gravity )-block locations are very sensitive // for optimal register usage and compiler optimizations // /* ----------------------------------------------------------------------------------------------- */ // KERNEL 2 - acoustic compute forces kernel /* ----------------------------------------------------------------------------------------------- */ template<int FORWARD_OR_ADJOINT> __global__ void #ifdef USE_LAUNCH_BOUNDS // adds compiler specification __launch_bounds__(NGLL3_PADDED,LAUNCH_MIN_BLOCKS_ACOUSTIC) #endif Kernel_2_acoustic_impl(const int nb_blocks_to_compute, const int* d_ibool, const int* d_irregular_element_number, const int* d_phase_ispec_inner_acoustic, const int num_phase_ispec_acoustic, const int d_iphase, field_const_p d_potential_acoustic, field_p d_potential_dot_dot_acoustic, field_const_p d_b_potential_acoustic, field_p d_b_potential_dot_dot_acoustic, const int nb_field, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, const realw xix_regular, const realw jacobian_regular, realw_const_p d_hprime_xx, realw_const_p hprimewgll_xx, realw_const_p wgllwgll_xy,realw_const_p wgllwgll_xz,realw_const_p wgllwgll_yz, realw* d_rhostore, const int use_mesh_coloring_gpu, const int gravity, realw_const_p minus_g, realw* d_kappastore, realw_const_p wgll_cube){ // block-id == number of local element id in phase_ispec array int bx = blockIdx.y*gridDim.x+blockIdx.x; // thread-id == GLL node id // note: use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads, // because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses; // to avoid execution branching and the need of registers to store an active state variable, // the thread ids are put in valid range int tx = threadIdx.x; int I,J,K; int iglob,offset; int working_element,ispec_irreg; field temp1l,temp2l,temp3l; realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl; realw jacobianl; field dpotentialdxl,dpotentialdyl,dpotentialdzl; realw fac1,fac2,fac3; realw rho_invl,kappa_invl; field sum_terms; field gravity_term; __shared__ field s_dummy_loc[2*NGLL3]; __shared__ field s_temp1[NGLL3]; __shared__ field s_temp2[NGLL3]; __shared__ field s_temp3[NGLL3]; __shared__ realw sh_hprime_xx[NGLL2]; __shared__ realw sh_hprimewgll_xx[NGLL2]; // arithmetic intensity: ratio of number-of-arithmetic-operations / number-of-bytes-accessed-on-DRAM // // hand-counts on floating-point operations: counts addition/subtraction/multiplication/division // no counts for operations on indices in for-loops (compiler will likely unrool loops) // // counts accesses to global memory, but no shared memory or register loads/stores // float has 4 bytes // counts: for simulations without gravity, without mesh_coloring // counts floating-point operations (FLOP) per thread // counts global memory accesses in bytes (BYTES) per block // 2 FLOP // // 0 BYTES // checks if anything to do if (bx >= nb_blocks_to_compute) return; // limits thread ids to range [0,125-1] if (tx >= NGLL3) tx = NGLL3-1; // counts: // + 1 FLOP // // + 0 BYTE // spectral-element id #ifdef USE_MESH_COLORING_GPU working_element = bx; #else //mesh coloring if (use_mesh_coloring_gpu ){ working_element = bx; }else{ // iphase-1 and working_element-1 for Fortran->C array conventions working_element = d_phase_ispec_inner_acoustic[bx + num_phase_ispec_acoustic*(d_iphase-1)]-1; } #endif // local padded index offset = working_element*NGLL3_PADDED + tx; ispec_irreg = d_irregular_element_number[working_element] -1; // global index iglob = d_ibool[offset] - 1; // counts: // + 8 FLOP // // (1 int + 2 float) * 128 threads = 1536 BYTE // loads potential values into shared memory if (threadIdx.x < NGLL3) { #ifdef USE_TEXTURES_FIELDS s_dummy_loc[tx] = texfetch_potential<FORWARD_OR_ADJOINT>(iglob); if (nb_field==2) s_dummy_loc[NGLL3+tx] = texfetch_potential<3>(iglob); #else // changing iglob indexing to match fortran row changes fast style s_dummy_loc[tx] = d_potential_acoustic[iglob]; if (nb_field==2) s_dummy_loc[NGLL3+tx] = d_b_potential_acoustic[iglob]; #endif } // counts: // + 0 FLOP // // + 1 float * 125 threads = 500 BYTE // gravity if (gravity ) kappa_invl = 1.f / d_kappastore[working_element*NGLL3 + tx]; // local index K = (tx/NGLL2); J = ((tx-K*NGLL2)/NGLLX); I = (tx-K*NGLL2-J*NGLLX); // counts: // + 8 FLOP // // + 0 BYTES // note: loads mesh values here to give compiler possibility to overlap memory fetches with some computations; // arguments defined as realw* instead of const realw* __restrict__ to avoid that the compiler // loads all memory by texture loads (arrays accesses are coalescent, thus no need for texture reads) // // calculates laplacian if (ispec_irreg >= 0){ //irregular_element int offset = ispec_irreg*NGLL3_PADDED + tx; xixl = d_xix[offset]; xiyl = d_xiy[offset]; xizl = d_xiz[offset]; etaxl = d_etax[offset]; etayl = d_etay[offset]; etazl = d_etaz[offset]; gammaxl = d_gammax[offset]; gammayl = d_gammay[offset]; gammazl = d_gammaz[offset]; jacobianl = 1.f / (xixl*(etayl*gammazl-etazl*gammayl) -xiyl*(etaxl*gammazl-etazl*gammaxl) +xizl*(etaxl*gammayl-etayl*gammaxl)); } // density (reciproc) rho_invl = 1.f / d_rhostore[offset]; // counts: // + 16 FLOP // // + 10 float * 128 threads = 5120 BYTE // loads hprime into shared memory if (tx < NGLL2) { #ifdef USE_TEXTURES_CONSTANTS sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx); #else sh_hprime_xx[tx] = d_hprime_xx[tx]; #endif // loads hprimewgll into shared memory sh_hprimewgll_xx[tx] = hprimewgll_xx[tx]; } // counts: // + 0 FLOP // // + 2 * 1 float * 25 threads = 200 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // summed terms with added gll weights fac1 = wgllwgll_yz[K*NGLLX+J]; fac2 = wgllwgll_xz[K*NGLLX+I]; fac3 = wgllwgll_xy[J*NGLLX+I]; // We make a loop over direct and adjoint wavefields inside the GPU kernel to increase arithmetic intensity for (int k = 0 ; k < nb_field ; k++){ // computes first matrix product temp1l = Make_field(0.f); temp2l = Make_field(0.f); temp3l = Make_field(0.f); for (int l=0;l<NGLLX;l++) { //assumes that hprime_xx = hprime_yy = hprime_zz // 1. cut-plane along xi-direction temp1l += s_dummy_loc[NGLL3*k+K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I]; // 2. cut-plane along eta-direction temp2l += s_dummy_loc[NGLL3*k+K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J]; // 3. cut-plane along gamma-direction temp3l += s_dummy_loc[NGLL3*k+l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE // compute derivatives of ux, uy and uz with respect to x, y and z // derivatives of potential if (threadIdx.x < NGLL3) { if (ispec_irreg >= 0){ //irregular_element dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l; dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l; dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l; // counts: // + 3 * 5 FLOP = 15 FLOP // // + 0 BYTE // form the dot product with the test vector s_temp1[tx] = jacobianl * rho_invl * (dpotentialdxl*xixl + dpotentialdyl*xiyl + dpotentialdzl*xizl); s_temp2[tx] = jacobianl * rho_invl * (dpotentialdxl*etaxl + dpotentialdyl*etayl + dpotentialdzl*etazl); s_temp3[tx] = jacobianl * rho_invl * (dpotentialdxl*gammaxl + dpotentialdyl*gammayl + dpotentialdzl*gammazl); } else{ s_temp1[tx] = jacobian_regular * rho_invl * temp1l * xix_regular * xix_regular; s_temp2[tx] = jacobian_regular * rho_invl * temp2l * xix_regular * xix_regular; s_temp3[tx] = jacobian_regular * rho_invl * temp3l * xix_regular * xix_regular; } } // pre-computes gravity sum term if (gravity ){ // uses potential definition: s = grad(chi) // // gravity term: 1/kappa grad(chi) * g // assumes that g only acts in (negative) z-direction gravity_term = minus_g[iglob] * kappa_invl * jacobianl * wgll_cube[tx] * dpotentialdzl; } // counts: // + 3 * 7 FLOP = 21 FLOP // // + 0 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes second matrix product temp1l = Make_field(0.f); temp2l = Make_field(0.f); temp3l = Make_field(0.f); for (int l=0;l<NGLLX;l++) { //assumes hprimewgll_xx = hprimewgll_yy = hprimewgll_zz // 1. cut-plane along xi-direction temp1l += s_temp1[K*NGLL2+J*NGLLX+l] * sh_hprimewgll_xx[I*NGLLX+l]; // 2. cut-plane along eta-direction temp2l += s_temp2[K*NGLL2+l*NGLLX+I] * sh_hprimewgll_xx[J*NGLLX+l]; // 3. cut-plane along gamma-direction temp3l += s_temp3[l*NGLL2+J*NGLLX+I] * sh_hprimewgll_xx[K*NGLLX+l]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE sum_terms = -(fac1*temp1l + fac2*temp2l + fac3*temp3l); // adds gravity contribution if (gravity) sum_terms += gravity_term; // counts: // + 3 * 2 FLOP + 6 FLOP = 12 FLOP // // + 3 float * 128 threads = 1536 BYTE __syncthreads(); // assembles potential array if (threadIdx.x < NGLL3) { #ifdef USE_MESH_COLORING_GPU // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS if (k==0) d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<3>(iglob) + sum_terms; #else if (k==0) d_potential_dot_dot_acoustic[iglob] += sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS #else // MESH_COLORING //mesh coloring if (use_mesh_coloring_gpu ){ // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS if (k==0) d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<3>(iglob) + sum_terms; #else if (k==0) d_potential_dot_dot_acoustic[iglob] += sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS }else{ if (k==0) atomicAdd(&d_potential_dot_dot_acoustic[iglob],sum_terms); if (k==1) atomicAdd(&d_b_potential_dot_dot_acoustic[iglob],sum_terms); } #endif // MESH_COLORING } } //loop over k (forward and adjoint wavefield) // counts: // + 1 FLOP // // + 1 float * 125 threads = 500 BYTE // ----------------- // total of: 323 FLOP per thread // ~ 128 * 323 = 41344 FLOP per block // // 8880 BYTE DRAM accesses per block // // -> arithmetic intensity: 41344 FLOP / 8880 BYTES ~ 4.66 FLOP/BYTE (hand-count) // // ----------------- // // nvprof: nvprof --metrics flops_sp ./xspecfem3D // -> 322631424 FLOPS (Single) floating-point operations for 20736 elements // -> 15559 FLOP per block // // -> arithmetic intensity: ~ 15559 / 8880 flop/byte = 1.75 flop/byte // // roofline model: Tesla K20x // --------------------------- // for a Kepler K20x card, the peak single-precision performance is about 3.95 TFlop/s. // global memory access has a bandwidth of ~ 250 GB/s. // thus there should be about 16 flop to hide a single byte memory access (3950./250. ~ 15.8 flop/byte = arithmetic intensity). // // memory bandwidth: 250 GB/s // single-precision peak performance: 3.95 TFlop/s -> corner arithmetic intensity = 3950 / 250 ~ 15.8 flop/byte // // note: // using dense matrix-matrix multiplication (SGEMM) leads to "practical" peak performance of around 2.9 TFlops. // (http://www.nvidia.com/docs/IO/122874/K20-and-K20X-application-performance-technical-brief.pdf) // // acoustic kernel has an arithmetic intensity of: hand-counts ~ 4.66 flop/byte // nvprof-counts ~ 1.75 flop/byte // // -> we can only achieve about: (hand-counts) 29% of the peak performance // (nvprof-counts) 11% of the peak performance // // i.e. 11% x theoretical peak performance ~ 440 GFlop/s. // 11% x "pratical" peak performance ~ 320 GFlop/s. // // CUDA_TIMING: we achieve about 224 GFlop/s (1 mpi process, 20736 elements) // -> that is about 8% of the "practical" peak. (or 70% of the theoretical arithmetic intensity) // // this might be due to the first compute code block (before first syncthreads), where // the partial arithmetic intensity is lower than for the total routine. // // roofline model: Tesla K20c (Kepler architecture: http://www.nvidia.com/content/tesla/pdf/Tesla-KSeries-Overview-LR.pdf) // --------------------------- // memory bandwidth: 208 GB/s // single-precision peak performance: 3.52 TFlop/s -> corner arithmetic intensity = 3520 / 208 ~ 16.9 flop/byte // // we can only achieve about: (hand-counts) 27% of the peak performance -> 970.6 GFlop/s // (nvprof-counts) 10% of the peak performance -> 364.5 GFlop/s - measured: 229.631 GFlop/s // // roofline model: nVidia GT 650m http://www.gpuzoo.com/GPU-NVIDIA/GeForce_GT_650M_DDR3.html // --------------------------- // memory bandwidth: 28.8 GB/s // single-precision peak performance: 625.6 GFlop/s -> corner arithmetic intensity = 625.6 / 28.8 ~ 21.7 flop/byte // // we can only achieve about: (hand-counts) 21% of the peak performance -> 132.6 GFlop/s // (nvprof-counts) 8% of the peak performance -> 50.5 GFlop/s - measured: 52.1907 GFlop/s // // // // better performance ideas and improvements are welcome :) } /* ----------------------------------------------------------------------------------------------- */ //Reference kernel, solving a single wavefield /* template<int FORWARD_OR_ADJOINT> __global__ void #ifdef USE_LAUNCH_BOUNDS // adds compiler specification __launch_bounds__(NGLL3_PADDED,LAUNCH_MIN_BLOCKS_ACOUSTIC) #endif Kernel_2_acoustic_impl(const int nb_blocks_to_compute, const int* d_ibool, const int* d_phase_ispec_inner_acoustic, const int num_phase_ispec_acoustic, const int d_iphase, realw_const_p d_potential_acoustic, realw_p d_potential_dot_dot_acoustic, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, realw_const_p d_hprime_xx, realw_const_p hprimewgll_xx, realw_const_p wgllwgll_xy,realw_const_p wgllwgll_xz,realw_const_p wgllwgll_yz, realw* d_rhostore, const int use_mesh_coloring_gpu, const int gravity, realw_const_p minus_g, realw* d_kappastore, realw_const_p wgll_cube){ // block-id == number of local element id in phase_ispec array int bx = blockIdx.y*gridDim.x+blockIdx.x; // thread-id == GLL node id // note: use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads, // because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses; // to avoid execution branching and the need of registers to store an active state variable, // the thread ids are put in valid range int tx = threadIdx.x; int I,J,K; int iglob,offset; int working_element; realw temp1l,temp2l,temp3l; realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl; realw jacobianl; realw dpotentialdxl,dpotentialdyl,dpotentialdzl; realw fac1,fac2,fac3; realw rho_invl,kappa_invl; realw sum_terms; realw gravity_term; __shared__ realw s_dummy_loc[NGLL3]; __shared__ realw s_temp1[NGLL3]; __shared__ realw s_temp2[NGLL3]; __shared__ realw s_temp3[NGLL3]; __shared__ realw sh_hprime_xx[NGLL2]; __shared__ realw sh_hprimewgll_xx[NGLL2]; // arithmetic intensity: ratio of number-of-arithmetic-operations / number-of-bytes-accessed-on-DRAM // // hand-counts on floating-point operations: counts addition/subtraction/multiplication/division // no counts for operations on indices in for-loops (compiler will likely unrool loops) // // counts accesses to global memory, but no shared memory or register loads/stores // float has 4 bytes // counts: for simulations without gravity, without mesh_coloring // counts floating-point operations (FLOP) per thread // counts global memory accesses in bytes (BYTES) per block // 2 FLOP // // 0 BYTES // checks if anything to do if (bx >= nb_blocks_to_compute) return; // limits thread ids to range [0,125-1] if (tx >= NGLL3) tx = NGLL3-1; // counts: // + 1 FLOP // // + 0 BYTE // spectral-element id #ifdef USE_MESH_COLORING_GPU working_element = bx; #else //mesh coloring if (use_mesh_coloring_gpu ){ working_element = bx; }else{ // iphase-1 and working_element-1 for Fortran->C array conventions working_element = d_phase_ispec_inner_acoustic[bx + num_phase_ispec_acoustic*(d_iphase-1)]-1; } #endif // local padded index offset = working_element*NGLL3_PADDED + tx; // global index iglob = d_ibool[offset] - 1; // counts: // + 7 FLOP // // + 2 float * 128 threads = 1024 BYTE // loads potential values into shared memory if (threadIdx.x < NGLL3) { #ifdef USE_TEXTURES_FIELDS s_dummy_loc[tx] = texfetch_potential<FORWARD_OR_ADJOINT>(iglob); #else // changing iglob indexing to match fortran row changes fast style s_dummy_loc[tx] = d_potential_acoustic[iglob]; #endif } // counts: // + 0 FLOP // // + 1 float * 125 threads = 500 BYTE // gravity if (gravity ){ kappa_invl = 1.f / d_kappastore[working_element*NGLL3 + tx]; } // local index K = (tx/NGLL2); J = ((tx-K*NGLL2)/NGLLX); I = (tx-K*NGLL2-J*NGLLX); // counts: // + 8 FLOP // // + 0 BYTES // note: loads mesh values here to give compiler possibility to overlap memory fetches with some computations; // arguments defined as realw* instead of const realw* __restrict__ to avoid that the compiler // loads all memory by texture loads (arrays accesses are coalescent, thus no need for texture reads) // // calculates laplacian xixl = d_xix[offset]; xiyl = d_xiy[offset]; xizl = d_xiz[offset]; etaxl = d_etax[offset]; etayl = d_etay[offset]; etazl = d_etaz[offset]; gammaxl = d_gammax[offset]; gammayl = d_gammay[offset]; gammazl = d_gammaz[offset]; jacobianl = 1.f / (xixl*(etayl*gammazl-etazl*gammayl) -xiyl*(etaxl*gammazl-etazl*gammaxl) +xizl*(etaxl*gammayl-etayl*gammaxl)); // density (reciproc) rho_invl = 1.f / d_rhostore[offset]; // counts: // + 16 FLOP // // + 10 float * 128 threads = 5120 BYTE // loads hprime into shared memory if (tx < NGLL2) { #ifdef USE_TEXTURES_CONSTANTS sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx); #else sh_hprime_xx[tx] = d_hprime_xx[tx]; #endif // loads hprimewgll into shared memory sh_hprimewgll_xx[tx] = hprimewgll_xx[tx]; } // counts: // + 0 FLOP // // + 2 * 1 float * 25 threads = 200 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes first matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes that hprime_xx = hprime_yy = hprime_zz // 1. cut-plane along xi-direction temp1l += s_dummy_loc[K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I]; // 2. cut-plane along eta-direction temp2l += s_dummy_loc[K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J]; // 3. cut-plane along gamma-direction temp3l += s_dummy_loc[l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE // compute derivatives of ux, uy and uz with respect to x, y and z // derivatives of potential dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l; dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l; dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l; // counts: // + 3 * 5 FLOP = 15 FLOP // // + 0 BYTE // form the dot product with the test vector if (threadIdx.x < NGLL3) { s_temp1[tx] = jacobianl * rho_invl * (dpotentialdxl*xixl + dpotentialdyl*xiyl + dpotentialdzl*xizl); s_temp2[tx] = jacobianl * rho_invl * (dpotentialdxl*etaxl + dpotentialdyl*etayl + dpotentialdzl*etazl); s_temp3[tx] = jacobianl * rho_invl * (dpotentialdxl*gammaxl + dpotentialdyl*gammayl + dpotentialdzl*gammazl); } // pre-computes gravity sum term if (gravity ){ // uses potential definition: s = grad(chi) // // gravity term: 1/kappa grad(chi) * g // assumes that g only acts in (negative) z-direction gravity_term = minus_g[iglob] * kappa_invl * jacobianl * wgll_cube[tx] * dpotentialdzl; } // counts: // + 3 * 7 FLOP = 21 FLOP // // + 0 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes second matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes hprimewgll_xx = hprimewgll_yy = hprimewgll_zz // 1. cut-plane along xi-direction temp1l += s_temp1[K*NGLL2+J*NGLLX+l] * sh_hprimewgll_xx[I*NGLLX+l]; // 2. cut-plane along eta-direction temp2l += s_temp2[K*NGLL2+l*NGLLX+I] * sh_hprimewgll_xx[J*NGLLX+l]; // 3. cut-plane along gamma-direction temp3l += s_temp3[l*NGLL2+J*NGLLX+I] * sh_hprimewgll_xx[K*NGLLX+l]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE // summed terms with added gll weights fac1 = wgllwgll_yz[K*NGLLX+J]; fac2 = wgllwgll_xz[K*NGLLX+I]; fac3 = wgllwgll_xy[J*NGLLX+I]; sum_terms = -(fac1*temp1l + fac2*temp2l + fac3*temp3l); // adds gravity contribution if (gravity) sum_terms += gravity_term; // counts: // + 3 * 2 FLOP + 6 FLOP = 12 FLOP // // + 3 float * 128 threads = 1536 BYTE // assembles potential array if (threadIdx.x < NGLL3) { #ifdef USE_MESH_COLORING_GPU // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; #else d_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS #else // MESH_COLORING //mesh coloring if (use_mesh_coloring_gpu ){ // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; #else d_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS }else{ atomicAdd(&d_potential_dot_dot_acoustic[iglob],sum_terms); } #endif // MESH_COLORING } // kernel useful for optimization: stripped-down version // acoustic kernel without gravity and without mesh coloring template<int FORWARD_OR_ADJOINT> __global__ void #ifdef USE_LAUNCH_BOUNDS // adds compiler specification __launch_bounds__(NGLL3_PADDED,LAUNCH_MIN_BLOCKS_ACOUSTIC) #endif Kernel_2_acoustic_perf_impl(const int nb_blocks_to_compute, const int* d_ibool, const int* d_phase_ispec_inner_acoustic, const int num_phase_ispec_acoustic, const int d_iphase, realw_const_p d_potential_acoustic, realw_p d_potential_dot_dot_acoustic, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, realw_const_p d_hprime_xx, realw_const_p hprimewgll_xx, realw_const_p wgllwgll_xy,realw_const_p wgllwgll_xz,realw_const_p wgllwgll_yz, realw* d_rhostore, const int use_mesh_coloring_gpu, const int gravity, realw_const_p minus_g, realw* d_kappastore, realw_const_p wgll_cube){ // note: this routine is using only 12 active blocks instead of full occupancy (16 active blocks) // due to small register spilling which slows down performance // timing: ~ 1.41 ms (Kepler: Tesla K20c) // block-id == number of local element id in phase_ispec array int bx = blockIdx.y*gridDim.x+blockIdx.x; // thread-id == GLL node id // note: use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads, // because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses; // to avoid execution branching and the need of registers to store an active state variable, // the thread ids are put in valid range int tx = threadIdx.x; int I,J,K; int iglob,offset; int working_element; realw temp1l,temp2l,temp3l; realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl; realw dpotentialdxl,dpotentialdyl,dpotentialdzl; realw fac1,fac2,fac3; realw rho_invl; realw sum_terms; __shared__ realw s_dummy_loc[NGLL3]; __shared__ realw s_temp1[NGLL3]; __shared__ realw s_temp2[NGLL3]; __shared__ realw s_temp3[NGLL3]; __shared__ realw sh_hprime_xx[NGLL2]; __shared__ realw sh_hprimewgll_xx[NGLL2]; // checks if anything to do if (bx >= nb_blocks_to_compute) return; // limits thread ids to range [0,125-1] if (tx >= NGLL3) tx = NGLL3 - 1; // spectral-element id // iphase-1 and working_element-1 for Fortran->C array conventions working_element = d_phase_ispec_inner_acoustic[bx + num_phase_ispec_acoustic*(d_iphase-1)] - 1; // local padded index offset = working_element*NGLL3_PADDED + tx; // global index iglob = d_ibool[offset] - 1; // loads potential values into shared memory if (threadIdx.x < NGLL3) { // loads potentials #ifdef USE_TEXTURES_FIELDS s_dummy_loc[tx] = texfetch_potential<FORWARD_OR_ADJOINT>(iglob); #else // changing iglob indexing to match fortran row changes fast style s_dummy_loc[tx] = d_potential_acoustic[iglob]; #endif } // local index K = (tx/NGLL2); J = ((tx-K*NGLL2)/NGLLX); I = (tx-K*NGLL2-J*NGLLX); // loads mesh values here to give compiler possibility to overlap memory fetches with some computations // note: arguments defined as realw* instead of const realw* __restrict__ to avoid that the compiler // loads all memory by texture loads // we only use the first loads explicitly by texture loads, all subsequent without. this should lead/trick // the compiler to use global memory loads for all the subsequent accesses. // // calculates laplacian //xixl = get_global_cr( &d_xix[offset] ); // first array with texture load xixl = d_xix[offset]; xiyl = d_xiy[offset]; // all subsequent without to avoid over-use of texture for coalescent access xizl = d_xiz[offset]; etaxl = d_etax[offset]; etayl = d_etay[offset]; etazl = d_etaz[offset]; gammaxl = d_gammax[offset]; gammayl = d_gammay[offset]; gammazl = d_gammaz[offset]; jacobianl = 1.f / (xixl*(etayl*gammazl-etazl*gammayl) -xiyl*(etaxl*gammazl-etazl*gammaxl) +xizl*(etaxl*gammayl-etayl*gammaxl)); // density (reciproc) rho_invl = 1.f / d_rhostore[offset]; // loads hprime into shared memory if (tx < NGLL2) { #ifdef USE_TEXTURES_CONSTANTS sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx); #else sh_hprime_xx[tx] = d_hprime_xx[tx]; #endif // loads hprimewgll into shared memory sh_hprimewgll_xx[tx] = hprimewgll_xx[tx]; } // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes first matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes that hprime_xx = hprime_yy = hprime_zz // 1. cut-plane along xi-direction temp1l += s_dummy_loc[K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I]; // 2. cut-plane along eta-direction temp2l += s_dummy_loc[K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J]; // 3. cut-plane along gamma-direction temp3l += s_dummy_loc[l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K]; } // compute derivatives of ux, uy and uz with respect to x, y and z // derivatives of potential dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l; dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l; dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l; // form the dot product with the test vector if (threadIdx.x < NGLL3) { s_temp1[tx] = jacobianl * rho_invl * (dpotentialdxl*xixl + dpotentialdyl*xiyl + dpotentialdzl*xizl); s_temp2[tx] = jacobianl * rho_invl * (dpotentialdxl*etaxl + dpotentialdyl*etayl + dpotentialdzl*etazl); s_temp3[tx] = jacobianl * rho_invl * (dpotentialdxl*gammaxl + dpotentialdyl*gammayl + dpotentialdzl*gammazl); } // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes second matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes hprimewgll_xx = hprimewgll_yy = hprimewgll_zz // 1. cut-plane along xi-direction temp1l += s_temp1[K*NGLL2+J*NGLLX+l] * sh_hprimewgll_xx[I*NGLLX+l]; // 2. cut-plane along eta-direction temp2l += s_temp2[K*NGLL2+l*NGLLX+I] * sh_hprimewgll_xx[J*NGLLX+l]; // 3. cut-plane along gamma-direction temp3l += s_temp3[l*NGLL2+J*NGLLX+I] * sh_hprimewgll_xx[K*NGLLX+l]; } // summed terms with added gll weights fac1 = wgllwgll_yz[K*NGLLX+J]; fac2 = wgllwgll_xz[K*NGLLX+I]; fac3 = wgllwgll_xy[J*NGLLX+I]; sum_terms = -(fac1*temp1l + fac2*temp2l + fac3*temp3l); // assembles potential array if (threadIdx.x < NGLL3) { atomicAdd(&d_potential_dot_dot_acoustic[iglob],sum_terms); } } */ /* ----------------------------------------------------------------------------------------------- */ void Kernel_2_acoustic(int nb_blocks_to_compute, Mesh* mp, int d_iphase, int* d_ibool, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, realw* d_rhostore, realw* d_kappastore){ #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("before acoustic kernel Kernel 2"); #endif // if the grid can handle the number of blocks, we let it be 1D // grid_2_x = nb_elem_color; // nb_elem_color is just how many blocks we are computing now int blocksize = NGLL3_PADDED; int num_blocks_x, num_blocks_y; get_blocks_xy(nb_blocks_to_compute,&num_blocks_x,&num_blocks_y); dim3 grid(num_blocks_x,num_blocks_y); dim3 threads(blocksize,1,1); // Cuda timing hipEvent_t start, stop; if (CUDA_TIMING ){ start_timing_cuda(&start,&stop); } int nb_field = mp->simulation_type == 3 ? 2 : 1 ; //This kernel treats both forward and adjoint wavefield within the same call, to increase performance ( ~37% faster for pure acoustic simulations ) hipLaunchKernelGGL(( Kernel_2_acoustic_impl<1>), dim3(grid),dim3(threads),0,mp->compute_stream, nb_blocks_to_compute, d_ibool, mp->d_irregular_element_number, mp->d_phase_ispec_inner_acoustic, mp->num_phase_ispec_acoustic, d_iphase, mp->d_potential_acoustic, mp->d_potential_dot_dot_acoustic, mp->d_b_potential_acoustic, mp->d_b_potential_dot_dot_acoustic, nb_field, d_xix, d_xiy, d_xiz, d_etax, d_etay, d_etaz, d_gammax, d_gammay, d_gammaz, mp->xix_regular,mp->jacobian_regular, mp->d_hprime_xx, mp->d_hprimewgll_xx, mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz, d_rhostore, mp->use_mesh_coloring_gpu, mp->gravity, mp->d_minus_g, d_kappastore, mp->d_wgll_cube); /* // Call to reference kernel, solving a single wavefield // forward wavefields -> FORWARD_OR_ADJOINT == 1 hipLaunchKernelGGL(( Kernel_2_acoustic_impl<1>), dim3(grid),dim3(threads),0,mp->compute_stream, nb_blocks_to_compute, d_ibool, mp->d_phase_ispec_inner_acoustic, mp->num_phase_ispec_acoustic, d_iphase, mp->d_potential_acoustic, mp->d_potential_dot_dot_acoustic, d_xix, d_xiy, d_xiz, d_etax, d_etay, d_etaz, d_gammax, d_gammay, d_gammaz, mp->d_hprime_xx, mp->d_hprimewgll_xx, mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz, d_rhostore, mp->use_mesh_coloring_gpu, mp->gravity, mp->d_minus_g, d_kappastore, mp->d_wgll_cube); */ // Cuda timing if (CUDA_TIMING ){ realw flops,time; stop_timing_cuda(&start,&stop,"Kernel_2_acoustic_impl",&time); // time in seconds time = time / 1000.; // performance if (! mp->gravity) { if (! mp->use_mesh_coloring_gpu ){ // see with: nvprof --metrics flops_sp ./xspecfem3D // -> using 322631424 FLOPS (Single) floating-point operations for 20736 elements // = 15559 FLOPS per block flops = 15559 * nb_blocks_to_compute; }else{ // coloring flops = 15559 * nb_blocks_to_compute; } }else{ // gravity flops = 15559 * nb_blocks_to_compute; } printf(" performance: %f GFlop/s\n", flops/time * 1.e-9); } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("kernel Kernel_2"); #endif } /* ----------------------------------------------------------------------------------------------- */ // main compute_forces_acoustic CUDA routine /* ----------------------------------------------------------------------------------------------- */ extern "C" void FC_FUNC_(compute_forces_acoustic_cuda, COMPUTE_FORCES_ACOUSTIC_CUDA)(long* Mesh_pointer, int* iphase, int* nspec_outer_acoustic, int* nspec_inner_acoustic) { TRACE("compute_forces_acoustic_cuda"); //double start_time = get_time(); Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper int num_elements; if (*iphase == 1) num_elements = *nspec_outer_acoustic; else num_elements = *nspec_inner_acoustic; if (num_elements == 0) return; // mesh coloring if (mp->use_mesh_coloring_gpu ){ // note: array offsets require sorted arrays, such that e.g. ibool starts with elastic elements // and followed by acoustic ones. // acoustic elements also start with outer than inner element ordering int nb_colors,nb_blocks_to_compute; int istart; int offset,offset_nonpadded; // sets up color loop if (*iphase == 1){ // outer elements nb_colors = mp->num_colors_outer_acoustic; istart = 0; // array offsets (acoustic elements start after elastic ones) offset = mp->nspec_elastic * NGLL3_PADDED; offset_nonpadded = mp->nspec_elastic * NGLL3; }else{ // inner element colors (start after outer elements) nb_colors = mp->num_colors_outer_acoustic + mp->num_colors_inner_acoustic; istart = mp->num_colors_outer_acoustic; // array offsets (inner elements start after outer ones) offset = ( mp->nspec_elastic + (*nspec_outer_acoustic) ) * NGLL3_PADDED; offset_nonpadded = ( mp->nspec_elastic + (*nspec_outer_acoustic) ) * NGLL3; } // loops over colors for(int icolor = istart; icolor < nb_colors; icolor++){ nb_blocks_to_compute = mp->h_num_elem_colors_acoustic[icolor]; Kernel_2_acoustic(nb_blocks_to_compute,mp,*iphase, mp->d_ibool + offset, mp->d_xix + offset,mp->d_xiy + offset,mp->d_xiz + offset, mp->d_etax + offset,mp->d_etay + offset,mp->d_etaz + offset, mp->d_gammax + offset,mp->d_gammay + offset,mp->d_gammaz + offset, mp->d_rhostore + offset, mp->d_kappastore + offset_nonpadded); // for padded and aligned arrays offset += nb_blocks_to_compute * NGLL3_PADDED; // for no-aligned arrays offset_nonpadded += nb_blocks_to_compute * NGLL3; } }else{ // no mesh coloring: uses atomic updates Kernel_2_acoustic(num_elements, mp, *iphase, mp->d_ibool, mp->d_xix,mp->d_xiy,mp->d_xiz, mp->d_etax,mp->d_etay,mp->d_etaz, mp->d_gammax,mp->d_gammay,mp->d_gammaz, mp->d_rhostore, mp->d_kappastore); } } /* ----------------------------------------------------------------------------------------------- */ /* KERNEL for enforce free surface */ /* ----------------------------------------------------------------------------------------------- */ __global__ void enforce_free_surface_cuda_kernel( field_p potential_acoustic, field_p potential_dot_acoustic, field_p potential_dot_dot_acoustic, const int num_free_surface_faces, const int* free_surface_ispec, const int* free_surface_ijk, const int* d_ibool, const int* ispec_is_acoustic) { // gets spectral element face id int iface = blockIdx.x + gridDim.x*blockIdx.y; // for all faces on free surface if (iface < num_free_surface_faces ){ int ispec = free_surface_ispec[iface]-1; // checks if element is in acoustic domain if (ispec_is_acoustic[ispec] ){ // gets global point index int igll = threadIdx.x + threadIdx.y*blockDim.x; int i = free_surface_ijk[INDEX3(NDIM,NGLL2,0,igll,iface)] - 1; // (1,igll,iface) int j = free_surface_ijk[INDEX3(NDIM,NGLL2,1,igll,iface)] - 1; int k = free_surface_ijk[INDEX3(NDIM,NGLL2,2,igll,iface)] - 1; int iglob = d_ibool[INDEX4_PADDED(NGLLX,NGLLX,NGLLX,i,j,k,ispec)] - 1; // sets potentials to zero at free surface potential_acoustic[iglob] = Make_field(0.f); potential_dot_acoustic[iglob] = Make_field(0.f); potential_dot_dot_acoustic[iglob] = Make_field(0.f); } } } /* ----------------------------------------------------------------------------------------------- */ extern "C" void FC_FUNC_(acoustic_enforce_free_surf_cuda, ACOUSTIC_ENFORCE_FREE_SURF_CUDA)(long* Mesh_pointer, int* ABSORB_INSTEAD_OF_FREE_SURFACE) { TRACE("acoustic_enforce_free_surf_cuda"); Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container // checks if anything to do if (*ABSORB_INSTEAD_OF_FREE_SURFACE == 0){ // does not absorb free surface, thus we enforce the potential to be zero at surface // block sizes int num_blocks_x, num_blocks_y; get_blocks_xy(mp->num_free_surface_faces,&num_blocks_x,&num_blocks_y); dim3 grid(num_blocks_x,num_blocks_y,1); dim3 threads(NGLL2,1,1); // sets potentials to zero at free surface hipLaunchKernelGGL(( enforce_free_surface_cuda_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_potential_acoustic, mp->d_potential_dot_acoustic, mp->d_potential_dot_dot_acoustic, mp->num_free_surface_faces, mp->d_free_surface_ispec, mp->d_free_surface_ijk, mp->d_ibool, mp->d_ispec_is_acoustic); // for backward/reconstructed potentials if (mp->simulation_type == 3) { hipLaunchKernelGGL(( enforce_free_surface_cuda_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_b_potential_acoustic, mp->d_b_potential_dot_acoustic, mp->d_b_potential_dot_dot_acoustic, mp->num_free_surface_faces, mp->d_free_surface_ispec, mp->d_free_surface_ijk, mp->d_ibool, mp->d_ispec_is_acoustic); } } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("enforce_free_surface_cuda"); #endif }
28e8a17168e8e51eb280d3fb8fd6b9f3563b7bbc.cu
/* !===================================================================== ! ! S p e c f e m 3 D V e r s i o n 3 . 0 ! --------------------------------------- ! ! Main historical authors: Dimitri Komatitsch and Jeroen Tromp ! CNRS, France ! and Princeton University, USA ! (there are currently many more authors!) ! (c) October 2017 ! ! This program is free software; you can redistribute it and/or modify ! it under the terms of the GNU General Public License as published by ! the Free Software Foundation; either version 3 of the License, or ! (at your option) any later version. ! ! This program is distributed in the hope that it will be useful, ! but WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ! GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along ! with this program; if not, write to the Free Software Foundation, Inc., ! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ! !===================================================================== */ #include "mesh_constants_cuda.h" /* ----------------------------------------------------------------------------------------------- */ #ifdef USE_TEXTURES_FIELDS realw_texture d_potential_tex; realw_texture d_potential_dot_dot_tex; //backward/reconstructed realw_texture d_b_potential_tex; realw_texture d_b_potential_dot_dot_tex; //note: texture variables are implicitly static, and cannot be passed as arguments to cuda kernels; // thus, 1) we thus use if-statements (FORWARD_OR_ADJOINT) to determine from which texture to fetch from // 2) we use templates // since if-statements are a bit slower as the variable is only known at runtime, we use option 2) // templates definitions template<int FORWARD_OR_ADJOINT> __device__ float texfetch_potential(int x); template<int FORWARD_OR_ADJOINT> __device__ float texfetch_potential_dot_dot(int x); // templates for texture fetching // FORWARD_OR_ADJOINT == 1 <- forward arrays template<> __device__ float texfetch_potential<1>(int x) { return tex1Dfetch(d_potential_tex, x); } template<> __device__ float texfetch_potential_dot_dot<1>(int x) { return tex1Dfetch(d_potential_dot_dot_tex, x); } // FORWARD_OR_ADJOINT == 3 <- backward/reconstructed arrays template<> __device__ float texfetch_potential<3>(int x) { return tex1Dfetch(d_b_potential_tex, x); } template<> __device__ float texfetch_potential_dot_dot<3>(int x) { return tex1Dfetch(d_b_potential_dot_dot_tex, x); } #endif #ifdef USE_TEXTURES_CONSTANTS extern realw_texture d_hprime_xx_tex; #endif // note on performance optimizations: // // instead of providing spezialized kernel routines (without mesh coloring, without gravity, etc.), // we only provide one "general" kernel to handle all cases. this reduces code redundancy and improves code readability. // as tradeoff, we take a little performance hit of around ~ 3% // // performance tests done: // - registers: we were trying to reduce the number of registers, as this is the main limiter for the // occupancy of the kernel. however, there is only little difference in register pressure for one "general" kernel // or multiple "spezialized" kernels. reducing registers is mainly achieved through the launch_bonds() directive. // - branching: we were trying to reduce code branches, such as the if-active check in earlier code versions. // reducing the branching helps the compiler to better optimize the executable. // - memory accesses: the global memory accesses are avoiding texture reads for coalescent arrays, as this is // still faster. thus we were using no __ldg() loads or __restricted__ pointer usage, // as those implicitly lead the compiler to use texture reads. // - arithmetic intensity: ratio of floating-point operations vs. memory accesses is still low for our kernels. // tests with using a loop over elements to re-use the constant arrays (like hprime, wgllwgll,..) and thus // increasing the arithmetic intensity failed because the number of registers increased as well. // this increased register pressure reduced the occupancy and slowed down the kernel performance. // - hiding memory latency: to minimize waiting times to retrieve a memory value from global memory, we put // some more calculations into the same code block before calling syncthreads(). this should help the // compiler to move independent calculations to wherever it can overlap it with memory access operations. // note, especially the if (gravity )-block locations are very sensitive // for optimal register usage and compiler optimizations // /* ----------------------------------------------------------------------------------------------- */ // KERNEL 2 - acoustic compute forces kernel /* ----------------------------------------------------------------------------------------------- */ template<int FORWARD_OR_ADJOINT> __global__ void #ifdef USE_LAUNCH_BOUNDS // adds compiler specification __launch_bounds__(NGLL3_PADDED,LAUNCH_MIN_BLOCKS_ACOUSTIC) #endif Kernel_2_acoustic_impl(const int nb_blocks_to_compute, const int* d_ibool, const int* d_irregular_element_number, const int* d_phase_ispec_inner_acoustic, const int num_phase_ispec_acoustic, const int d_iphase, field_const_p d_potential_acoustic, field_p d_potential_dot_dot_acoustic, field_const_p d_b_potential_acoustic, field_p d_b_potential_dot_dot_acoustic, const int nb_field, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, const realw xix_regular, const realw jacobian_regular, realw_const_p d_hprime_xx, realw_const_p hprimewgll_xx, realw_const_p wgllwgll_xy,realw_const_p wgllwgll_xz,realw_const_p wgllwgll_yz, realw* d_rhostore, const int use_mesh_coloring_gpu, const int gravity, realw_const_p minus_g, realw* d_kappastore, realw_const_p wgll_cube){ // block-id == number of local element id in phase_ispec array int bx = blockIdx.y*gridDim.x+blockIdx.x; // thread-id == GLL node id // note: use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads, // because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses; // to avoid execution branching and the need of registers to store an active state variable, // the thread ids are put in valid range int tx = threadIdx.x; int I,J,K; int iglob,offset; int working_element,ispec_irreg; field temp1l,temp2l,temp3l; realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl; realw jacobianl; field dpotentialdxl,dpotentialdyl,dpotentialdzl; realw fac1,fac2,fac3; realw rho_invl,kappa_invl; field sum_terms; field gravity_term; __shared__ field s_dummy_loc[2*NGLL3]; __shared__ field s_temp1[NGLL3]; __shared__ field s_temp2[NGLL3]; __shared__ field s_temp3[NGLL3]; __shared__ realw sh_hprime_xx[NGLL2]; __shared__ realw sh_hprimewgll_xx[NGLL2]; // arithmetic intensity: ratio of number-of-arithmetic-operations / number-of-bytes-accessed-on-DRAM // // hand-counts on floating-point operations: counts addition/subtraction/multiplication/division // no counts for operations on indices in for-loops (compiler will likely unrool loops) // // counts accesses to global memory, but no shared memory or register loads/stores // float has 4 bytes // counts: for simulations without gravity, without mesh_coloring // counts floating-point operations (FLOP) per thread // counts global memory accesses in bytes (BYTES) per block // 2 FLOP // // 0 BYTES // checks if anything to do if (bx >= nb_blocks_to_compute) return; // limits thread ids to range [0,125-1] if (tx >= NGLL3) tx = NGLL3-1; // counts: // + 1 FLOP // // + 0 BYTE // spectral-element id #ifdef USE_MESH_COLORING_GPU working_element = bx; #else //mesh coloring if (use_mesh_coloring_gpu ){ working_element = bx; }else{ // iphase-1 and working_element-1 for Fortran->C array conventions working_element = d_phase_ispec_inner_acoustic[bx + num_phase_ispec_acoustic*(d_iphase-1)]-1; } #endif // local padded index offset = working_element*NGLL3_PADDED + tx; ispec_irreg = d_irregular_element_number[working_element] -1; // global index iglob = d_ibool[offset] - 1; // counts: // + 8 FLOP // // (1 int + 2 float) * 128 threads = 1536 BYTE // loads potential values into shared memory if (threadIdx.x < NGLL3) { #ifdef USE_TEXTURES_FIELDS s_dummy_loc[tx] = texfetch_potential<FORWARD_OR_ADJOINT>(iglob); if (nb_field==2) s_dummy_loc[NGLL3+tx] = texfetch_potential<3>(iglob); #else // changing iglob indexing to match fortran row changes fast style s_dummy_loc[tx] = d_potential_acoustic[iglob]; if (nb_field==2) s_dummy_loc[NGLL3+tx] = d_b_potential_acoustic[iglob]; #endif } // counts: // + 0 FLOP // // + 1 float * 125 threads = 500 BYTE // gravity if (gravity ) kappa_invl = 1.f / d_kappastore[working_element*NGLL3 + tx]; // local index K = (tx/NGLL2); J = ((tx-K*NGLL2)/NGLLX); I = (tx-K*NGLL2-J*NGLLX); // counts: // + 8 FLOP // // + 0 BYTES // note: loads mesh values here to give compiler possibility to overlap memory fetches with some computations; // arguments defined as realw* instead of const realw* __restrict__ to avoid that the compiler // loads all memory by texture loads (arrays accesses are coalescent, thus no need for texture reads) // // calculates laplacian if (ispec_irreg >= 0){ //irregular_element int offset = ispec_irreg*NGLL3_PADDED + tx; xixl = d_xix[offset]; xiyl = d_xiy[offset]; xizl = d_xiz[offset]; etaxl = d_etax[offset]; etayl = d_etay[offset]; etazl = d_etaz[offset]; gammaxl = d_gammax[offset]; gammayl = d_gammay[offset]; gammazl = d_gammaz[offset]; jacobianl = 1.f / (xixl*(etayl*gammazl-etazl*gammayl) -xiyl*(etaxl*gammazl-etazl*gammaxl) +xizl*(etaxl*gammayl-etayl*gammaxl)); } // density (reciproc) rho_invl = 1.f / d_rhostore[offset]; // counts: // + 16 FLOP // // + 10 float * 128 threads = 5120 BYTE // loads hprime into shared memory if (tx < NGLL2) { #ifdef USE_TEXTURES_CONSTANTS sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx); #else sh_hprime_xx[tx] = d_hprime_xx[tx]; #endif // loads hprimewgll into shared memory sh_hprimewgll_xx[tx] = hprimewgll_xx[tx]; } // counts: // + 0 FLOP // // + 2 * 1 float * 25 threads = 200 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // summed terms with added gll weights fac1 = wgllwgll_yz[K*NGLLX+J]; fac2 = wgllwgll_xz[K*NGLLX+I]; fac3 = wgllwgll_xy[J*NGLLX+I]; // We make a loop over direct and adjoint wavefields inside the GPU kernel to increase arithmetic intensity for (int k = 0 ; k < nb_field ; k++){ // computes first matrix product temp1l = Make_field(0.f); temp2l = Make_field(0.f); temp3l = Make_field(0.f); for (int l=0;l<NGLLX;l++) { //assumes that hprime_xx = hprime_yy = hprime_zz // 1. cut-plane along xi-direction temp1l += s_dummy_loc[NGLL3*k+K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I]; // 2. cut-plane along eta-direction temp2l += s_dummy_loc[NGLL3*k+K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J]; // 3. cut-plane along gamma-direction temp3l += s_dummy_loc[NGLL3*k+l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE // compute derivatives of ux, uy and uz with respect to x, y and z // derivatives of potential if (threadIdx.x < NGLL3) { if (ispec_irreg >= 0){ //irregular_element dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l; dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l; dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l; // counts: // + 3 * 5 FLOP = 15 FLOP // // + 0 BYTE // form the dot product with the test vector s_temp1[tx] = jacobianl * rho_invl * (dpotentialdxl*xixl + dpotentialdyl*xiyl + dpotentialdzl*xizl); s_temp2[tx] = jacobianl * rho_invl * (dpotentialdxl*etaxl + dpotentialdyl*etayl + dpotentialdzl*etazl); s_temp3[tx] = jacobianl * rho_invl * (dpotentialdxl*gammaxl + dpotentialdyl*gammayl + dpotentialdzl*gammazl); } else{ s_temp1[tx] = jacobian_regular * rho_invl * temp1l * xix_regular * xix_regular; s_temp2[tx] = jacobian_regular * rho_invl * temp2l * xix_regular * xix_regular; s_temp3[tx] = jacobian_regular * rho_invl * temp3l * xix_regular * xix_regular; } } // pre-computes gravity sum term if (gravity ){ // uses potential definition: s = grad(chi) // // gravity term: 1/kappa grad(chi) * g // assumes that g only acts in (negative) z-direction gravity_term = minus_g[iglob] * kappa_invl * jacobianl * wgll_cube[tx] * dpotentialdzl; } // counts: // + 3 * 7 FLOP = 21 FLOP // // + 0 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes second matrix product temp1l = Make_field(0.f); temp2l = Make_field(0.f); temp3l = Make_field(0.f); for (int l=0;l<NGLLX;l++) { //assumes hprimewgll_xx = hprimewgll_yy = hprimewgll_zz // 1. cut-plane along xi-direction temp1l += s_temp1[K*NGLL2+J*NGLLX+l] * sh_hprimewgll_xx[I*NGLLX+l]; // 2. cut-plane along eta-direction temp2l += s_temp2[K*NGLL2+l*NGLLX+I] * sh_hprimewgll_xx[J*NGLLX+l]; // 3. cut-plane along gamma-direction temp3l += s_temp3[l*NGLL2+J*NGLLX+I] * sh_hprimewgll_xx[K*NGLLX+l]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE sum_terms = -(fac1*temp1l + fac2*temp2l + fac3*temp3l); // adds gravity contribution if (gravity) sum_terms += gravity_term; // counts: // + 3 * 2 FLOP + 6 FLOP = 12 FLOP // // + 3 float * 128 threads = 1536 BYTE __syncthreads(); // assembles potential array if (threadIdx.x < NGLL3) { #ifdef USE_MESH_COLORING_GPU // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS if (k==0) d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<3>(iglob) + sum_terms; #else if (k==0) d_potential_dot_dot_acoustic[iglob] += sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS #else // MESH_COLORING //mesh coloring if (use_mesh_coloring_gpu ){ // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS if (k==0) d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<3>(iglob) + sum_terms; #else if (k==0) d_potential_dot_dot_acoustic[iglob] += sum_terms; if (k==1) d_b_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS }else{ if (k==0) atomicAdd(&d_potential_dot_dot_acoustic[iglob],sum_terms); if (k==1) atomicAdd(&d_b_potential_dot_dot_acoustic[iglob],sum_terms); } #endif // MESH_COLORING } } //loop over k (forward and adjoint wavefield) // counts: // + 1 FLOP // // + 1 float * 125 threads = 500 BYTE // ----------------- // total of: 323 FLOP per thread // ~ 128 * 323 = 41344 FLOP per block // // 8880 BYTE DRAM accesses per block // // -> arithmetic intensity: 41344 FLOP / 8880 BYTES ~ 4.66 FLOP/BYTE (hand-count) // // ----------------- // // nvprof: nvprof --metrics flops_sp ./xspecfem3D // -> 322631424 FLOPS (Single) floating-point operations for 20736 elements // -> 15559 FLOP per block // // -> arithmetic intensity: ~ 15559 / 8880 flop/byte = 1.75 flop/byte // // roofline model: Tesla K20x // --------------------------- // for a Kepler K20x card, the peak single-precision performance is about 3.95 TFlop/s. // global memory access has a bandwidth of ~ 250 GB/s. // thus there should be about 16 flop to hide a single byte memory access (3950./250. ~ 15.8 flop/byte = arithmetic intensity). // // memory bandwidth: 250 GB/s // single-precision peak performance: 3.95 TFlop/s -> corner arithmetic intensity = 3950 / 250 ~ 15.8 flop/byte // // note: // using dense matrix-matrix multiplication (SGEMM) leads to "practical" peak performance of around 2.9 TFlops. // (http://www.nvidia.com/docs/IO/122874/K20-and-K20X-application-performance-technical-brief.pdf) // // acoustic kernel has an arithmetic intensity of: hand-counts ~ 4.66 flop/byte // nvprof-counts ~ 1.75 flop/byte // // -> we can only achieve about: (hand-counts) 29% of the peak performance // (nvprof-counts) 11% of the peak performance // // i.e. 11% x theoretical peak performance ~ 440 GFlop/s. // 11% x "pratical" peak performance ~ 320 GFlop/s. // // CUDA_TIMING: we achieve about 224 GFlop/s (1 mpi process, 20736 elements) // -> that is about 8% of the "practical" peak. (or 70% of the theoretical arithmetic intensity) // // this might be due to the first compute code block (before first syncthreads), where // the partial arithmetic intensity is lower than for the total routine. // // roofline model: Tesla K20c (Kepler architecture: http://www.nvidia.com/content/tesla/pdf/Tesla-KSeries-Overview-LR.pdf) // --------------------------- // memory bandwidth: 208 GB/s // single-precision peak performance: 3.52 TFlop/s -> corner arithmetic intensity = 3520 / 208 ~ 16.9 flop/byte // // we can only achieve about: (hand-counts) 27% of the peak performance -> 970.6 GFlop/s // (nvprof-counts) 10% of the peak performance -> 364.5 GFlop/s - measured: 229.631 GFlop/s // // roofline model: nVidia GT 650m http://www.gpuzoo.com/GPU-NVIDIA/GeForce_GT_650M_DDR3.html // --------------------------- // memory bandwidth: 28.8 GB/s // single-precision peak performance: 625.6 GFlop/s -> corner arithmetic intensity = 625.6 / 28.8 ~ 21.7 flop/byte // // we can only achieve about: (hand-counts) 21% of the peak performance -> 132.6 GFlop/s // (nvprof-counts) 8% of the peak performance -> 50.5 GFlop/s - measured: 52.1907 GFlop/s // // // // better performance ideas and improvements are welcome :) } /* ----------------------------------------------------------------------------------------------- */ //Reference kernel, solving a single wavefield /* template<int FORWARD_OR_ADJOINT> __global__ void #ifdef USE_LAUNCH_BOUNDS // adds compiler specification __launch_bounds__(NGLL3_PADDED,LAUNCH_MIN_BLOCKS_ACOUSTIC) #endif Kernel_2_acoustic_impl(const int nb_blocks_to_compute, const int* d_ibool, const int* d_phase_ispec_inner_acoustic, const int num_phase_ispec_acoustic, const int d_iphase, realw_const_p d_potential_acoustic, realw_p d_potential_dot_dot_acoustic, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, realw_const_p d_hprime_xx, realw_const_p hprimewgll_xx, realw_const_p wgllwgll_xy,realw_const_p wgllwgll_xz,realw_const_p wgllwgll_yz, realw* d_rhostore, const int use_mesh_coloring_gpu, const int gravity, realw_const_p minus_g, realw* d_kappastore, realw_const_p wgll_cube){ // block-id == number of local element id in phase_ispec array int bx = blockIdx.y*gridDim.x+blockIdx.x; // thread-id == GLL node id // note: use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads, // because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses; // to avoid execution branching and the need of registers to store an active state variable, // the thread ids are put in valid range int tx = threadIdx.x; int I,J,K; int iglob,offset; int working_element; realw temp1l,temp2l,temp3l; realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl; realw jacobianl; realw dpotentialdxl,dpotentialdyl,dpotentialdzl; realw fac1,fac2,fac3; realw rho_invl,kappa_invl; realw sum_terms; realw gravity_term; __shared__ realw s_dummy_loc[NGLL3]; __shared__ realw s_temp1[NGLL3]; __shared__ realw s_temp2[NGLL3]; __shared__ realw s_temp3[NGLL3]; __shared__ realw sh_hprime_xx[NGLL2]; __shared__ realw sh_hprimewgll_xx[NGLL2]; // arithmetic intensity: ratio of number-of-arithmetic-operations / number-of-bytes-accessed-on-DRAM // // hand-counts on floating-point operations: counts addition/subtraction/multiplication/division // no counts for operations on indices in for-loops (compiler will likely unrool loops) // // counts accesses to global memory, but no shared memory or register loads/stores // float has 4 bytes // counts: for simulations without gravity, without mesh_coloring // counts floating-point operations (FLOP) per thread // counts global memory accesses in bytes (BYTES) per block // 2 FLOP // // 0 BYTES // checks if anything to do if (bx >= nb_blocks_to_compute) return; // limits thread ids to range [0,125-1] if (tx >= NGLL3) tx = NGLL3-1; // counts: // + 1 FLOP // // + 0 BYTE // spectral-element id #ifdef USE_MESH_COLORING_GPU working_element = bx; #else //mesh coloring if (use_mesh_coloring_gpu ){ working_element = bx; }else{ // iphase-1 and working_element-1 for Fortran->C array conventions working_element = d_phase_ispec_inner_acoustic[bx + num_phase_ispec_acoustic*(d_iphase-1)]-1; } #endif // local padded index offset = working_element*NGLL3_PADDED + tx; // global index iglob = d_ibool[offset] - 1; // counts: // + 7 FLOP // // + 2 float * 128 threads = 1024 BYTE // loads potential values into shared memory if (threadIdx.x < NGLL3) { #ifdef USE_TEXTURES_FIELDS s_dummy_loc[tx] = texfetch_potential<FORWARD_OR_ADJOINT>(iglob); #else // changing iglob indexing to match fortran row changes fast style s_dummy_loc[tx] = d_potential_acoustic[iglob]; #endif } // counts: // + 0 FLOP // // + 1 float * 125 threads = 500 BYTE // gravity if (gravity ){ kappa_invl = 1.f / d_kappastore[working_element*NGLL3 + tx]; } // local index K = (tx/NGLL2); J = ((tx-K*NGLL2)/NGLLX); I = (tx-K*NGLL2-J*NGLLX); // counts: // + 8 FLOP // // + 0 BYTES // note: loads mesh values here to give compiler possibility to overlap memory fetches with some computations; // arguments defined as realw* instead of const realw* __restrict__ to avoid that the compiler // loads all memory by texture loads (arrays accesses are coalescent, thus no need for texture reads) // // calculates laplacian xixl = d_xix[offset]; xiyl = d_xiy[offset]; xizl = d_xiz[offset]; etaxl = d_etax[offset]; etayl = d_etay[offset]; etazl = d_etaz[offset]; gammaxl = d_gammax[offset]; gammayl = d_gammay[offset]; gammazl = d_gammaz[offset]; jacobianl = 1.f / (xixl*(etayl*gammazl-etazl*gammayl) -xiyl*(etaxl*gammazl-etazl*gammaxl) +xizl*(etaxl*gammayl-etayl*gammaxl)); // density (reciproc) rho_invl = 1.f / d_rhostore[offset]; // counts: // + 16 FLOP // // + 10 float * 128 threads = 5120 BYTE // loads hprime into shared memory if (tx < NGLL2) { #ifdef USE_TEXTURES_CONSTANTS sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx); #else sh_hprime_xx[tx] = d_hprime_xx[tx]; #endif // loads hprimewgll into shared memory sh_hprimewgll_xx[tx] = hprimewgll_xx[tx]; } // counts: // + 0 FLOP // // + 2 * 1 float * 25 threads = 200 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes first matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes that hprime_xx = hprime_yy = hprime_zz // 1. cut-plane along xi-direction temp1l += s_dummy_loc[K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I]; // 2. cut-plane along eta-direction temp2l += s_dummy_loc[K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J]; // 3. cut-plane along gamma-direction temp3l += s_dummy_loc[l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE // compute derivatives of ux, uy and uz with respect to x, y and z // derivatives of potential dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l; dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l; dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l; // counts: // + 3 * 5 FLOP = 15 FLOP // // + 0 BYTE // form the dot product with the test vector if (threadIdx.x < NGLL3) { s_temp1[tx] = jacobianl * rho_invl * (dpotentialdxl*xixl + dpotentialdyl*xiyl + dpotentialdzl*xizl); s_temp2[tx] = jacobianl * rho_invl * (dpotentialdxl*etaxl + dpotentialdyl*etayl + dpotentialdzl*etazl); s_temp3[tx] = jacobianl * rho_invl * (dpotentialdxl*gammaxl + dpotentialdyl*gammayl + dpotentialdzl*gammazl); } // pre-computes gravity sum term if (gravity ){ // uses potential definition: s = grad(chi) // // gravity term: 1/kappa grad(chi) * g // assumes that g only acts in (negative) z-direction gravity_term = minus_g[iglob] * kappa_invl * jacobianl * wgll_cube[tx] * dpotentialdzl; } // counts: // + 3 * 7 FLOP = 21 FLOP // // + 0 BYTE // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes second matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes hprimewgll_xx = hprimewgll_yy = hprimewgll_zz // 1. cut-plane along xi-direction temp1l += s_temp1[K*NGLL2+J*NGLLX+l] * sh_hprimewgll_xx[I*NGLLX+l]; // 2. cut-plane along eta-direction temp2l += s_temp2[K*NGLL2+l*NGLLX+I] * sh_hprimewgll_xx[J*NGLLX+l]; // 3. cut-plane along gamma-direction temp3l += s_temp3[l*NGLL2+J*NGLLX+I] * sh_hprimewgll_xx[K*NGLLX+l]; } // counts: // + NGLLX * 3 * 8 FLOP = 120 FLOP // // + 0 BYTE // summed terms with added gll weights fac1 = wgllwgll_yz[K*NGLLX+J]; fac2 = wgllwgll_xz[K*NGLLX+I]; fac3 = wgllwgll_xy[J*NGLLX+I]; sum_terms = -(fac1*temp1l + fac2*temp2l + fac3*temp3l); // adds gravity contribution if (gravity) sum_terms += gravity_term; // counts: // + 3 * 2 FLOP + 6 FLOP = 12 FLOP // // + 3 float * 128 threads = 1536 BYTE // assembles potential array if (threadIdx.x < NGLL3) { #ifdef USE_MESH_COLORING_GPU // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; #else d_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS #else // MESH_COLORING //mesh coloring if (use_mesh_coloring_gpu ){ // no atomic operation needed, colors don't share global points between elements #ifdef USE_TEXTURES_FIELDS d_potential_dot_dot_acoustic[iglob] = texfetch_potential_dot_dot<FORWARD_OR_ADJOINT>(iglob) + sum_terms; #else d_potential_dot_dot_acoustic[iglob] += sum_terms; #endif // USE_TEXTURES_FIELDS }else{ atomicAdd(&d_potential_dot_dot_acoustic[iglob],sum_terms); } #endif // MESH_COLORING } // kernel useful for optimization: stripped-down version // acoustic kernel without gravity and without mesh coloring template<int FORWARD_OR_ADJOINT> __global__ void #ifdef USE_LAUNCH_BOUNDS // adds compiler specification __launch_bounds__(NGLL3_PADDED,LAUNCH_MIN_BLOCKS_ACOUSTIC) #endif Kernel_2_acoustic_perf_impl(const int nb_blocks_to_compute, const int* d_ibool, const int* d_phase_ispec_inner_acoustic, const int num_phase_ispec_acoustic, const int d_iphase, realw_const_p d_potential_acoustic, realw_p d_potential_dot_dot_acoustic, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, realw_const_p d_hprime_xx, realw_const_p hprimewgll_xx, realw_const_p wgllwgll_xy,realw_const_p wgllwgll_xz,realw_const_p wgllwgll_yz, realw* d_rhostore, const int use_mesh_coloring_gpu, const int gravity, realw_const_p minus_g, realw* d_kappastore, realw_const_p wgll_cube){ // note: this routine is using only 12 active blocks instead of full occupancy (16 active blocks) // due to small register spilling which slows down performance // timing: ~ 1.41 ms (Kepler: Tesla K20c) // block-id == number of local element id in phase_ispec array int bx = blockIdx.y*gridDim.x+blockIdx.x; // thread-id == GLL node id // note: use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads, // because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses; // to avoid execution branching and the need of registers to store an active state variable, // the thread ids are put in valid range int tx = threadIdx.x; int I,J,K; int iglob,offset; int working_element; realw temp1l,temp2l,temp3l; realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl; realw dpotentialdxl,dpotentialdyl,dpotentialdzl; realw fac1,fac2,fac3; realw rho_invl; realw sum_terms; __shared__ realw s_dummy_loc[NGLL3]; __shared__ realw s_temp1[NGLL3]; __shared__ realw s_temp2[NGLL3]; __shared__ realw s_temp3[NGLL3]; __shared__ realw sh_hprime_xx[NGLL2]; __shared__ realw sh_hprimewgll_xx[NGLL2]; // checks if anything to do if (bx >= nb_blocks_to_compute) return; // limits thread ids to range [0,125-1] if (tx >= NGLL3) tx = NGLL3 - 1; // spectral-element id // iphase-1 and working_element-1 for Fortran->C array conventions working_element = d_phase_ispec_inner_acoustic[bx + num_phase_ispec_acoustic*(d_iphase-1)] - 1; // local padded index offset = working_element*NGLL3_PADDED + tx; // global index iglob = d_ibool[offset] - 1; // loads potential values into shared memory if (threadIdx.x < NGLL3) { // loads potentials #ifdef USE_TEXTURES_FIELDS s_dummy_loc[tx] = texfetch_potential<FORWARD_OR_ADJOINT>(iglob); #else // changing iglob indexing to match fortran row changes fast style s_dummy_loc[tx] = d_potential_acoustic[iglob]; #endif } // local index K = (tx/NGLL2); J = ((tx-K*NGLL2)/NGLLX); I = (tx-K*NGLL2-J*NGLLX); // loads mesh values here to give compiler possibility to overlap memory fetches with some computations // note: arguments defined as realw* instead of const realw* __restrict__ to avoid that the compiler // loads all memory by texture loads // we only use the first loads explicitly by texture loads, all subsequent without. this should lead/trick // the compiler to use global memory loads for all the subsequent accesses. // // calculates laplacian //xixl = get_global_cr( &d_xix[offset] ); // first array with texture load xixl = d_xix[offset]; xiyl = d_xiy[offset]; // all subsequent without to avoid over-use of texture for coalescent access xizl = d_xiz[offset]; etaxl = d_etax[offset]; etayl = d_etay[offset]; etazl = d_etaz[offset]; gammaxl = d_gammax[offset]; gammayl = d_gammay[offset]; gammazl = d_gammaz[offset]; jacobianl = 1.f / (xixl*(etayl*gammazl-etazl*gammayl) -xiyl*(etaxl*gammazl-etazl*gammaxl) +xizl*(etaxl*gammayl-etayl*gammaxl)); // density (reciproc) rho_invl = 1.f / d_rhostore[offset]; // loads hprime into shared memory if (tx < NGLL2) { #ifdef USE_TEXTURES_CONSTANTS sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx); #else sh_hprime_xx[tx] = d_hprime_xx[tx]; #endif // loads hprimewgll into shared memory sh_hprimewgll_xx[tx] = hprimewgll_xx[tx]; } // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes first matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes that hprime_xx = hprime_yy = hprime_zz // 1. cut-plane along xi-direction temp1l += s_dummy_loc[K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I]; // 2. cut-plane along eta-direction temp2l += s_dummy_loc[K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J]; // 3. cut-plane along gamma-direction temp3l += s_dummy_loc[l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K]; } // compute derivatives of ux, uy and uz with respect to x, y and z // derivatives of potential dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l; dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l; dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l; // form the dot product with the test vector if (threadIdx.x < NGLL3) { s_temp1[tx] = jacobianl * rho_invl * (dpotentialdxl*xixl + dpotentialdyl*xiyl + dpotentialdzl*xizl); s_temp2[tx] = jacobianl * rho_invl * (dpotentialdxl*etaxl + dpotentialdyl*etayl + dpotentialdzl*etazl); s_temp3[tx] = jacobianl * rho_invl * (dpotentialdxl*gammaxl + dpotentialdyl*gammayl + dpotentialdzl*gammazl); } // synchronize all the threads (one thread for each of the NGLL grid points of the // current spectral element) because we need the whole element to be ready in order // to be able to compute the matrix products along cut planes of the 3D element below __syncthreads(); // computes second matrix product temp1l = 0.f; temp2l = 0.f; temp3l = 0.f; for (int l=0;l<NGLLX;l++) { //assumes hprimewgll_xx = hprimewgll_yy = hprimewgll_zz // 1. cut-plane along xi-direction temp1l += s_temp1[K*NGLL2+J*NGLLX+l] * sh_hprimewgll_xx[I*NGLLX+l]; // 2. cut-plane along eta-direction temp2l += s_temp2[K*NGLL2+l*NGLLX+I] * sh_hprimewgll_xx[J*NGLLX+l]; // 3. cut-plane along gamma-direction temp3l += s_temp3[l*NGLL2+J*NGLLX+I] * sh_hprimewgll_xx[K*NGLLX+l]; } // summed terms with added gll weights fac1 = wgllwgll_yz[K*NGLLX+J]; fac2 = wgllwgll_xz[K*NGLLX+I]; fac3 = wgllwgll_xy[J*NGLLX+I]; sum_terms = -(fac1*temp1l + fac2*temp2l + fac3*temp3l); // assembles potential array if (threadIdx.x < NGLL3) { atomicAdd(&d_potential_dot_dot_acoustic[iglob],sum_terms); } } */ /* ----------------------------------------------------------------------------------------------- */ void Kernel_2_acoustic(int nb_blocks_to_compute, Mesh* mp, int d_iphase, int* d_ibool, realw* d_xix,realw* d_xiy,realw* d_xiz, realw* d_etax,realw* d_etay,realw* d_etaz, realw* d_gammax,realw* d_gammay,realw* d_gammaz, realw* d_rhostore, realw* d_kappastore){ #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("before acoustic kernel Kernel 2"); #endif // if the grid can handle the number of blocks, we let it be 1D // grid_2_x = nb_elem_color; // nb_elem_color is just how many blocks we are computing now int blocksize = NGLL3_PADDED; int num_blocks_x, num_blocks_y; get_blocks_xy(nb_blocks_to_compute,&num_blocks_x,&num_blocks_y); dim3 grid(num_blocks_x,num_blocks_y); dim3 threads(blocksize,1,1); // Cuda timing cudaEvent_t start, stop; if (CUDA_TIMING ){ start_timing_cuda(&start,&stop); } int nb_field = mp->simulation_type == 3 ? 2 : 1 ; //This kernel treats both forward and adjoint wavefield within the same call, to increase performance ( ~37% faster for pure acoustic simulations ) Kernel_2_acoustic_impl<1><<<grid,threads,0,mp->compute_stream>>>(nb_blocks_to_compute, d_ibool, mp->d_irregular_element_number, mp->d_phase_ispec_inner_acoustic, mp->num_phase_ispec_acoustic, d_iphase, mp->d_potential_acoustic, mp->d_potential_dot_dot_acoustic, mp->d_b_potential_acoustic, mp->d_b_potential_dot_dot_acoustic, nb_field, d_xix, d_xiy, d_xiz, d_etax, d_etay, d_etaz, d_gammax, d_gammay, d_gammaz, mp->xix_regular,mp->jacobian_regular, mp->d_hprime_xx, mp->d_hprimewgll_xx, mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz, d_rhostore, mp->use_mesh_coloring_gpu, mp->gravity, mp->d_minus_g, d_kappastore, mp->d_wgll_cube); /* // Call to reference kernel, solving a single wavefield // forward wavefields -> FORWARD_OR_ADJOINT == 1 Kernel_2_acoustic_impl<1><<<grid,threads,0,mp->compute_stream>>>(nb_blocks_to_compute, d_ibool, mp->d_phase_ispec_inner_acoustic, mp->num_phase_ispec_acoustic, d_iphase, mp->d_potential_acoustic, mp->d_potential_dot_dot_acoustic, d_xix, d_xiy, d_xiz, d_etax, d_etay, d_etaz, d_gammax, d_gammay, d_gammaz, mp->d_hprime_xx, mp->d_hprimewgll_xx, mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz, d_rhostore, mp->use_mesh_coloring_gpu, mp->gravity, mp->d_minus_g, d_kappastore, mp->d_wgll_cube); */ // Cuda timing if (CUDA_TIMING ){ realw flops,time; stop_timing_cuda(&start,&stop,"Kernel_2_acoustic_impl",&time); // time in seconds time = time / 1000.; // performance if (! mp->gravity) { if (! mp->use_mesh_coloring_gpu ){ // see with: nvprof --metrics flops_sp ./xspecfem3D // -> using 322631424 FLOPS (Single) floating-point operations for 20736 elements // = 15559 FLOPS per block flops = 15559 * nb_blocks_to_compute; }else{ // coloring flops = 15559 * nb_blocks_to_compute; } }else{ // gravity flops = 15559 * nb_blocks_to_compute; } printf(" performance: %f GFlop/s\n", flops/time * 1.e-9); } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("kernel Kernel_2"); #endif } /* ----------------------------------------------------------------------------------------------- */ // main compute_forces_acoustic CUDA routine /* ----------------------------------------------------------------------------------------------- */ extern "C" void FC_FUNC_(compute_forces_acoustic_cuda, COMPUTE_FORCES_ACOUSTIC_CUDA)(long* Mesh_pointer, int* iphase, int* nspec_outer_acoustic, int* nspec_inner_acoustic) { TRACE("compute_forces_acoustic_cuda"); //double start_time = get_time(); Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper int num_elements; if (*iphase == 1) num_elements = *nspec_outer_acoustic; else num_elements = *nspec_inner_acoustic; if (num_elements == 0) return; // mesh coloring if (mp->use_mesh_coloring_gpu ){ // note: array offsets require sorted arrays, such that e.g. ibool starts with elastic elements // and followed by acoustic ones. // acoustic elements also start with outer than inner element ordering int nb_colors,nb_blocks_to_compute; int istart; int offset,offset_nonpadded; // sets up color loop if (*iphase == 1){ // outer elements nb_colors = mp->num_colors_outer_acoustic; istart = 0; // array offsets (acoustic elements start after elastic ones) offset = mp->nspec_elastic * NGLL3_PADDED; offset_nonpadded = mp->nspec_elastic * NGLL3; }else{ // inner element colors (start after outer elements) nb_colors = mp->num_colors_outer_acoustic + mp->num_colors_inner_acoustic; istart = mp->num_colors_outer_acoustic; // array offsets (inner elements start after outer ones) offset = ( mp->nspec_elastic + (*nspec_outer_acoustic) ) * NGLL3_PADDED; offset_nonpadded = ( mp->nspec_elastic + (*nspec_outer_acoustic) ) * NGLL3; } // loops over colors for(int icolor = istart; icolor < nb_colors; icolor++){ nb_blocks_to_compute = mp->h_num_elem_colors_acoustic[icolor]; Kernel_2_acoustic(nb_blocks_to_compute,mp,*iphase, mp->d_ibool + offset, mp->d_xix + offset,mp->d_xiy + offset,mp->d_xiz + offset, mp->d_etax + offset,mp->d_etay + offset,mp->d_etaz + offset, mp->d_gammax + offset,mp->d_gammay + offset,mp->d_gammaz + offset, mp->d_rhostore + offset, mp->d_kappastore + offset_nonpadded); // for padded and aligned arrays offset += nb_blocks_to_compute * NGLL3_PADDED; // for no-aligned arrays offset_nonpadded += nb_blocks_to_compute * NGLL3; } }else{ // no mesh coloring: uses atomic updates Kernel_2_acoustic(num_elements, mp, *iphase, mp->d_ibool, mp->d_xix,mp->d_xiy,mp->d_xiz, mp->d_etax,mp->d_etay,mp->d_etaz, mp->d_gammax,mp->d_gammay,mp->d_gammaz, mp->d_rhostore, mp->d_kappastore); } } /* ----------------------------------------------------------------------------------------------- */ /* KERNEL for enforce free surface */ /* ----------------------------------------------------------------------------------------------- */ __global__ void enforce_free_surface_cuda_kernel( field_p potential_acoustic, field_p potential_dot_acoustic, field_p potential_dot_dot_acoustic, const int num_free_surface_faces, const int* free_surface_ispec, const int* free_surface_ijk, const int* d_ibool, const int* ispec_is_acoustic) { // gets spectral element face id int iface = blockIdx.x + gridDim.x*blockIdx.y; // for all faces on free surface if (iface < num_free_surface_faces ){ int ispec = free_surface_ispec[iface]-1; // checks if element is in acoustic domain if (ispec_is_acoustic[ispec] ){ // gets global point index int igll = threadIdx.x + threadIdx.y*blockDim.x; int i = free_surface_ijk[INDEX3(NDIM,NGLL2,0,igll,iface)] - 1; // (1,igll,iface) int j = free_surface_ijk[INDEX3(NDIM,NGLL2,1,igll,iface)] - 1; int k = free_surface_ijk[INDEX3(NDIM,NGLL2,2,igll,iface)] - 1; int iglob = d_ibool[INDEX4_PADDED(NGLLX,NGLLX,NGLLX,i,j,k,ispec)] - 1; // sets potentials to zero at free surface potential_acoustic[iglob] = Make_field(0.f); potential_dot_acoustic[iglob] = Make_field(0.f); potential_dot_dot_acoustic[iglob] = Make_field(0.f); } } } /* ----------------------------------------------------------------------------------------------- */ extern "C" void FC_FUNC_(acoustic_enforce_free_surf_cuda, ACOUSTIC_ENFORCE_FREE_SURF_CUDA)(long* Mesh_pointer, int* ABSORB_INSTEAD_OF_FREE_SURFACE) { TRACE("acoustic_enforce_free_surf_cuda"); Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container // checks if anything to do if (*ABSORB_INSTEAD_OF_FREE_SURFACE == 0){ // does not absorb free surface, thus we enforce the potential to be zero at surface // block sizes int num_blocks_x, num_blocks_y; get_blocks_xy(mp->num_free_surface_faces,&num_blocks_x,&num_blocks_y); dim3 grid(num_blocks_x,num_blocks_y,1); dim3 threads(NGLL2,1,1); // sets potentials to zero at free surface enforce_free_surface_cuda_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_potential_acoustic, mp->d_potential_dot_acoustic, mp->d_potential_dot_dot_acoustic, mp->num_free_surface_faces, mp->d_free_surface_ispec, mp->d_free_surface_ijk, mp->d_ibool, mp->d_ispec_is_acoustic); // for backward/reconstructed potentials if (mp->simulation_type == 3) { enforce_free_surface_cuda_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_b_potential_acoustic, mp->d_b_potential_dot_acoustic, mp->d_b_potential_dot_dot_acoustic, mp->num_free_surface_faces, mp->d_free_surface_ispec, mp->d_free_surface_ijk, mp->d_ibool, mp->d_ispec_is_acoustic); } } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("enforce_free_surface_cuda"); #endif }
8bc3190abd3b500dfee0465b53242fa24be7d171.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/sim_matrix_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" namespace caffe { template <typename Dtype> __global__ void SMForward(const int n, const Dtype* in1, const Dtype* in2, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in1[index] * in2[index]; } } template <typename Dtype> void SimMatrixLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data1 = bottom[0]->gpu_data(); const Dtype* bottom_data2 = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* temp_data = bottom[1]->mutable_gpu_diff(); const Dtype* weight = this->blobs_[0]->gpu_data(); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K2_, K1_, (Dtype)1., bottom_data1, weight, (Dtype)0., temp_data); const Dtype* temp_data2 = bottom[1]->gpu_diff(); hipLaunchKernelGGL(( SMForward<Dtype>), dim3(CAFFE_GET_BLOCKS(M_*K2_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, M_*K2_, bottom_data2, temp_data2, temp_data); CUDA_POST_KERNEL_CHECK; for(int i = 0; i< M_; i ++) { *(top_data+i)=0; for(int j = 0; j < K2_; j ++) { *(top_data+i)+=*(bottom[1]->cpu_diff()+i*K2_+j); } } } template <typename Dtype> void SimMatrixLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Backward_cpu(top, propagate_down, bottom); } INSTANTIATE_LAYER_GPU_FUNCS(SimMatrixLayer); } // namespace caffe
8bc3190abd3b500dfee0465b53242fa24be7d171.cu
#include <cfloat> #include <vector> #include "caffe/layers/sim_matrix_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" namespace caffe { template <typename Dtype> __global__ void SMForward(const int n, const Dtype* in1, const Dtype* in2, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in1[index] * in2[index]; } } template <typename Dtype> void SimMatrixLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data1 = bottom[0]->gpu_data(); const Dtype* bottom_data2 = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* temp_data = bottom[1]->mutable_gpu_diff(); const Dtype* weight = this->blobs_[0]->gpu_data(); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K2_, K1_, (Dtype)1., bottom_data1, weight, (Dtype)0., temp_data); const Dtype* temp_data2 = bottom[1]->gpu_diff(); SMForward<Dtype><<<CAFFE_GET_BLOCKS(M_*K2_), CAFFE_CUDA_NUM_THREADS>>>( M_*K2_, bottom_data2, temp_data2, temp_data); CUDA_POST_KERNEL_CHECK; for(int i = 0; i< M_; i ++) { *(top_data+i)=0; for(int j = 0; j < K2_; j ++) { *(top_data+i)+=*(bottom[1]->cpu_diff()+i*K2_+j); } } } template <typename Dtype> void SimMatrixLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Backward_cpu(top, propagate_down, bottom); } INSTANTIATE_LAYER_GPU_FUNCS(SimMatrixLayer); } // namespace caffe
905946f2b6224714731790a3bc3ca9db86c235c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./dropblock-gpu-inl.h" namespace mxnet { namespace op { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) using namespace mshadow::cuda; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return ::max( ::min( (N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __global__ void DropblockForwardKernel( const int nthreads, const T* input_data, T* output_data, T* output_mask, const int* dev_mask ) { CUDA_1D_KERNEL_LOOP(index,nthreads){ //output_mask[index] = dev_mask[index] * (1.0f / pkeep); output_mask[index] = dev_mask[index] ; output_data[index] = output_mask[index] * input_data[index]; } } template<typename T> __global__ void DropblockBackwardKernel( const int N,//gdata.Size() T* gdata, const T* grad, const T* mask ){ CUDA_1D_KERNEL_LOOP(index,N){ gdata[index]=grad[index]*mask[index]; } } template<typename xpu> void DropblockForwardCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType>& req, const std::vector<TBlob> &outputs){ using namespace mshadow; using namespace mshadow::expr; const GPUDropblockParam param =nnvm::get<GPUDropblockParam>(attrs.parsed); if (req[gpudropblock::kOut]!=kNullOp) { CHECK_EQ(inputs.size(),1U); if(ctx.is_train) { CHECK_EQ(outputs.size(),2U); } Stream<gpu> *s=ctx.get_stream<gpu>(); const int count=inputs[gpudropblock::kData].Size(); const int num_batches=inputs[gpudropblock::kData].shape_[0]; const int channels=inputs[gpudropblock::kData].shape_[1]; const int height=inputs[gpudropblock::kData].shape_[2]; const int width=inputs[gpudropblock::kData].shape_[3]; const TBlob &out=outputs[gpudropblock::kOut]; hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); if(ctx.is_train||(param.mode==gpudropblock::kAlways)) { real_t pkeep=param.p; static int iteration=0; static float p_current=1.0; if (p_current>pkeep) { ++iteration; p_current-=((p_current-pkeep)/5000.0)*iteration; } else{ p_current=pkeep; } const int blocksize=param.block_size; index_t feat_size=height; double gamma = ((1 - p_current) / (blocksize * blocksize)) * ((feat_size * feat_size) /((feat_size - blocksize + 1) * (feat_size - blocksize + 1))); index_t mask_reduction=blocksize/2; index_t mask_height,mask_width; if ((blocksize % 2) != 0) { mask_height = height - mask_reduction * 2; mask_width = width - mask_reduction * 2; } else { mask_height = height - mask_reduction * 2 + 1; mask_width = width - mask_reduction * 2 + 1; } index_t mask_area = mask_height * mask_width; index_t random_points_num = static_cast<int>(mask_area * gamma); //np.arange() std::vector<int> a; for (int i = 0; i < mask_area; ++i) { a.push_back(i); } std::vector<std::vector<std::vector<int>>> mask(num_batches, std::vector<std::vector<int >>(1,std::vector<int>(mask_area, 0))); //random.sample(a,n) for(int i=0;i<random_points_num;) { index_t randnum=rand()%mask_area; if(a[randnum]!=-100) { a[randnum]=-100; ++i; } } for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < mask_area; ++j) { if (a[j] == -100) { mask[i][0][j] = 1; } } } std::vector<std::vector<std::vector<std::vector<int>>>> mask_new(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( mask_height, std::vector<int>( mask_width)))); // mask=mask.reshape([data.shape[0], 1, mask_height, mask_width]) for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < mask_area; ++k) { index_t mask_i = k / mask_width; index_t mask_j = k % mask_width; mask_new[i][j][mask_i][mask_j] = mask[i][j][k]; } } } //weight_mat std::vector<std::vector<std::vector<std::vector<int>>>> weight_mat(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( blocksize, std::vector<int>( blocksize, 1)))); //padding //block_sizepadding index_t padding=0; if(blocksize==3) { padding=blocksize/2 +1; } else if (blocksize==5) { padding=ceil(blocksize/2.0)+1; } else if(blocksize>5) { padding=ceil(blocksize/2.0)+2; } index_t padding_height = mask_height + 2 * padding; index_t padding_width = mask_width + 2 * padding; std::vector<std::vector<std::vector<std::vector<int>>>> mask_padding(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( padding_height, std::vector<int>( padding_width)))); for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < padding_height; ++k) { for (int l = 0; l < padding_width; ++l) { if (k < padding || l < padding) { mask_padding[i][j][k][l] = 0; } else if (k > (mask_height + 1) || l > (mask_width + 1)) { mask_padding[i][j][k][l] = 0; } else { mask_padding[i][j][k][l] = mask_new[i][j][k - padding][l - padding]; } } } } } std::vector<std::vector<std::vector<int >>> mask_3d(num_batches, std::vector<std::vector<int >>(1, std::vector<int >( padding_height * padding_width))); for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < padding_height; ++k) { for (int l = 0; l < padding_width; ++l) { mask_3d[i][j][l + k * padding_width] = mask_padding[i][j][k][l]; } } } } //weightmat std::vector<std::vector<std::vector<int>>> kernel_3d(num_batches, std::vector<std::vector<int>>(1, std::vector<int>( blocksize * blocksize))); for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < blocksize; ++k) { for (int l = 0; l < blocksize; ++l) { kernel_3d[i][j][l + k * blocksize] = weight_mat[i][j][k][l]; } } } } // index_t outm = padding_height - blocksize + 1; // index_t convAw = blocksize * blocksize; index_t convAh = padding_height * padding_width; // std::vector<std::vector<std::vector<int>>> A_convert(num_batches, std::vector<std::vector<int>>(1, std::vector<int>( convAh * convAw))); for (int n = 0; n < num_batches; ++n) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int m = 0; m < outm; ++m) { index_t wh = k * outm * convAw + m * convAw;//k*9*9+m*121 index_t col1 = k * padding_height + m;//k*11+m 0 index_t col2 = (k + 1) * padding_height + m;//(k+1)*11+m 11 index_t col3 = (k + 2) * padding_height + m;//(k+2)*11+m 22 index_t col4 = (k + 3) * padding_height + m;//(k+3)*11+m index_t col5 = (k + 4) * padding_height + m;//(k+4)*11+m index_t col6 = (k + 5) * padding_height + m; index_t col7 = (k + 6) * padding_height + m; if (blocksize == 3) { A_convert[n][j][wh] = mask_3d[n][j][col1]; A_convert[n][j][wh + 1] = mask_3d[n][j][col1 + 1]; A_convert[n][j][wh + 2] = mask_3d[n][j][col1 + 2]; A_convert[n][j][wh + 3] = mask_3d[n][j][col2]; A_convert[n][j][wh + 4] = mask_3d[n][j][col2 + 1]; A_convert[n][j][wh + 5] = mask_3d[n][j][col2 + 2]; A_convert[n][j][wh + 6] = mask_3d[n][j][col3]; A_convert[n][j][wh + 7] = mask_3d[n][j][col3 + 1]; A_convert[n][j][wh + 8] = mask_3d[n][j][col3 + 2]; } else if (blocksize == 5) { A_convert[n][j][wh] = mask_3d[n][j][col1]; A_convert[n][j][wh + 1] = mask_3d[n][j][col1 + 1]; A_convert[n][j][wh + 2] = mask_3d[n][j][col1 + 2]; A_convert[n][j][wh + 3] = mask_3d[n][j][col1 + 3]; A_convert[n][j][wh + 4] = mask_3d[n][j][col1 + 4]; A_convert[n][j][wh + 5] = mask_3d[n][j][col2]; A_convert[n][j][wh + 6] = mask_3d[n][j][col2 + 1]; A_convert[n][j][wh + 7] = mask_3d[n][j][col2 + 2]; A_convert[n][j][wh + 8] = mask_3d[n][j][col2 + 3]; A_convert[n][j][wh + 9] = mask_3d[n][j][col2 + 4]; A_convert[n][j][wh + 10] = mask_3d[n][j][col3]; A_convert[n][j][wh + 11] = mask_3d[n][j][col3 + 1]; A_convert[n][j][wh + 12] = mask_3d[n][j][col3 + 2]; A_convert[n][j][wh + 13] = mask_3d[n][j][col3 + 3]; A_convert[n][j][wh + 14] = mask_3d[n][j][col3 + 4]; A_convert[n][j][wh + 15] = mask_3d[n][j][col4]; A_convert[n][j][wh + 16] = mask_3d[n][j][col4 + 1]; A_convert[n][j][wh + 17] = mask_3d[n][j][col4 + 2]; A_convert[n][j][wh + 18] = mask_3d[n][j][col4 + 3]; A_convert[n][j][wh + 19] = mask_3d[n][j][col4 + 4]; A_convert[n][j][wh + 20] = mask_3d[n][j][col5]; A_convert[n][j][wh + 21] = mask_3d[n][j][col5 + 1]; A_convert[n][j][wh + 22] = mask_3d[n][j][col5 + 2]; A_convert[n][j][wh + 23] = mask_3d[n][j][col5 + 3]; A_convert[n][j][wh + 24] = mask_3d[n][j][col5 + 4]; }else if (blocksize == 7) { A_convert[n][j][wh] = mask_3d[n][j][col1]; A_convert[n][j][wh + 1] = mask_3d[n][j][col1 + 1]; A_convert[n][j][wh + 2] = mask_3d[n][j][col1 + 2]; A_convert[n][j][wh + 3] = mask_3d[n][j][col1 + 3]; A_convert[n][j][wh + 4] = mask_3d[n][j][col1 + 4]; A_convert[n][j][wh + 5] = mask_3d[n][j][col1 + 5]; A_convert[n][j][wh + 6] = mask_3d[n][j][col1 + 6]; A_convert[n][j][wh + 7] = mask_3d[n][j][col2]; A_convert[n][j][wh + 8] = mask_3d[n][j][col2 + 1]; A_convert[n][j][wh + 9] = mask_3d[n][j][col2 + 2]; A_convert[n][j][wh + 10] = mask_3d[n][j][col2 + 3]; A_convert[n][j][wh + 11] = mask_3d[n][j][col2 + 4]; A_convert[n][j][wh + 12] = mask_3d[n][j][col2 + 5]; A_convert[n][j][wh + 13] = mask_3d[n][j][col2 + 6]; A_convert[n][j][wh + 14] = mask_3d[n][j][col3]; A_convert[n][j][wh + 15] = mask_3d[n][j][col3 + 1]; A_convert[n][j][wh + 16] = mask_3d[n][j][col3 + 2]; A_convert[n][j][wh + 17] = mask_3d[n][j][col3 + 3]; A_convert[n][j][wh + 18] = mask_3d[n][j][col3 + 4]; A_convert[n][j][wh + 19] = mask_3d[n][j][col3 + 5]; A_convert[n][j][wh + 20] = mask_3d[n][j][col3 + 6]; A_convert[n][j][wh + 21] = mask_3d[n][j][col4]; A_convert[n][j][wh + 22] = mask_3d[n][j][col4 + 1]; A_convert[n][j][wh + 23] = mask_3d[n][j][col4 + 2]; A_convert[n][j][wh + 24] = mask_3d[n][j][col4 + 3]; A_convert[n][j][wh + 25] = mask_3d[n][j][col4 + 4]; A_convert[n][j][wh + 26] = mask_3d[n][j][col4 + 5]; A_convert[n][j][wh + 27] = mask_3d[n][j][col4 + 6]; A_convert[n][j][wh + 28] = mask_3d[n][j][col5]; A_convert[n][j][wh + 29] = mask_3d[n][j][col5 + 1]; A_convert[n][j][wh + 30] = mask_3d[n][j][col5 + 2]; A_convert[n][j][wh + 31] = mask_3d[n][j][col5 + 3]; A_convert[n][j][wh + 32] = mask_3d[n][j][col5 + 4]; A_convert[n][j][wh + 33] = mask_3d[n][j][col5 + 5]; A_convert[n][j][wh + 34] = mask_3d[n][j][col5 + 6]; A_convert[n][j][wh + 35] = mask_3d[n][j][col6]; A_convert[n][j][wh + 36] = mask_3d[n][j][col6 + 1]; A_convert[n][j][wh + 37] = mask_3d[n][j][col6 + 2]; A_convert[n][j][wh + 38] = mask_3d[n][j][col6 + 3]; A_convert[n][j][wh + 39] = mask_3d[n][j][col6 + 4]; A_convert[n][j][wh + 40] = mask_3d[n][j][col6 + 5]; A_convert[n][j][wh + 41] = mask_3d[n][j][col6 + 6]; A_convert[n][j][wh + 42] = mask_3d[n][j][col7]; A_convert[n][j][wh + 43] = mask_3d[n][j][col7 + 1]; A_convert[n][j][wh + 44] = mask_3d[n][j][col7 + 2]; A_convert[n][j][wh + 45] = mask_3d[n][j][col7 + 3]; A_convert[n][j][wh + 46] = mask_3d[n][j][col7 + 4]; A_convert[n][j][wh + 47] = mask_3d[n][j][col7 + 5]; A_convert[n][j][wh + 48] = mask_3d[n][j][col7 + 6]; } } } } } std::vector<int> conv_cache;// for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int l = 0; l < outm; ++l) { int result_one_position = 0; index_t wh = k * outm * convAw + l * convAw; for (int m = 0; m < convAw; ++m) { result_one_position += A_convert[i][j][wh + m] * kernel_3d[i][j][m]; } conv_cache.push_back(result_one_position); } } } } //4 std::vector<std::vector<std::vector<std::vector<int>>>> mask_conved(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( outm, std::vector<int>( outm)))); index_t delta = blocksize / 2; index_t input_height = mask_height + delta * 2; index_t input_width = mask_width + delta * 2; index_t height_to_crop = outm - input_height; index_t width_to_crop = outm - input_width; if (height_to_crop != 0) { for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm - height_to_crop + 1; ++k) { for (int l = 0; l < outm; ++l) { mask_conved[i][j][k][l] = (conv_cache[i * outm * (outm - height_to_crop) + j * outm * (outm - height_to_crop) + k * outm + l]==0)? 1:0; } } } } } if (width_to_crop != 0) { for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int l = 0; l < outm - width_to_crop + 1; ++l) { mask_conved[i][j][k][l] =( conv_cache[i * outm * (outm - width_to_crop) + j * outm * (outm - width_to_crop) + k * (outm - width_to_crop) + l]==0)? 1:0; } } } } } if ((width_to_crop != 0)&&(height_to_crop!=0)) { for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm-height_to_crop+1; ++k) { for (int l = 0; l < outm - width_to_crop + 1; ++l) { mask_conved[i][j][k][l] =( conv_cache[i * (outm-height_to_crop) * (outm - width_to_crop) + j * (outm-height_to_crop) * (outm - width_to_crop) + k * (outm - width_to_crop) + l]==0)? 1:0; } } } } } for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int l = 0; l < outm; ++l) { mask_conved[i][j][k][l] =(conv_cache[i * outm * outm + j * outm * outm + k * outm + l]==0)? 1:0; } } } } //mask_conved1Dindata int mask_conved_1d[count]; int *dev_mask; for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < channels; ++j) { for (int k = 0; k < height; ++k) { for (int l = 0; l < width; ++l) { mask_conved_1d[i * channels * height * width + j * height * width + k * width + l] = mask_conved[i][0][k][l]; } } } } //allocate memory on GPU hipMalloc((void**)&dev_mask,count* sizeof(int)); hipMemcpy(dev_mask,mask_conved_1d,count* sizeof(int),hipMemcpyHostToDevice); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{ const DType *input_data=inputs[gpudropblock::kData].dptr<DType>(); DType *mask_out=outputs[gpudropblock::kMask].dptr<DType>(); DType *dropblock_out=outputs[gpudropblock::kOut].dptr<DType>(); hipLaunchKernelGGL(( DropblockForwardKernel<DType>), dim3(ROI_GET_BLOCKS(count)),dim3(kMaxThreadsPerBlock),0,stream, count,input_data,dropblock_out,mask_out,dev_mask ); }) hipFree (dev_mask); } else{ const TBlob& data = inputs[gpudropblock::kData]; if (req[gpudropblock::kOut] == kWriteTo) { mxnet_op::copy(s, out, data); } else { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{MXNET_ASSIGN_REQ_SWITCH(req[gpudropblock::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), data.dptr<DType>());//identity:input==output }); }) } } } } template <typename xpu> void DropblockBackwardCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs){ CHECK_EQ(inputs.size(),2U); CHECK_EQ(outputs.size(),1); CHECK_EQ(req.size(),1); using namespace mshadow; using namespace mshadow::expr; std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[gpudropblock::kOut]=inputs[0]; out_data[gpudropblock::kMask]=inputs[1]; Stream<gpu> *s=ctx.get_stream<gpu>(); hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); const GPUDropblockParam param =nnvm::get<GPUDropblockParam>(attrs.parsed); if(ctx.is_train||param.mode==gpudropblock::kAlways) { const TBlob &gdata=outputs[gpudropblock::kData]; const TBlob &grad=out_grads[gpudropblock::kOut]; const TBlob &mask=out_data[gpudropblock::kMask]; const int count=inputs[gpudropblock::kData].Size(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{ hipLaunchKernelGGL(( DropblockBackwardKernel<DType>) , dim3(ROI_GET_BLOCKS(count)), dim3(kMaxThreadsPerBlock), 0, stream, count, gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>() ); }) }else{ const TBlob& gdata = outputs[gpudropblock::kData]; const TBlob& grad = out_grads[gpudropblock::kOut]; if (req[gpudropblock::kData] == kWriteTo) { mxnet_op::copy(s, gdata, grad); } else { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{MXNET_ASSIGN_REQ_SWITCH(req[gpudropblock::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); });}) } } } NNVM_REGISTER_OP(GPUDropblock) .set_attr<FCompute>("FCompute<gpu>", DropblockForwardCompute<gpu>); NNVM_REGISTER_OP(_backward_GPUDropblock) .set_attr<FCompute>("FCompute<gpu>", DropblockBackwardCompute<gpu>); } // namespace op } // namespace mxnet
905946f2b6224714731790a3bc3ca9db86c235c9.cu
#include "./dropblock-gpu-inl.h" namespace mxnet { namespace op { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) using namespace mshadow::cuda; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return std::max( std::min( (N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __global__ void DropblockForwardKernel( const int nthreads, const T* input_data, T* output_data, T* output_mask, const int* dev_mask ) { CUDA_1D_KERNEL_LOOP(index,nthreads){ //output_mask[index] = dev_mask[index] * (1.0f / pkeep); output_mask[index] = dev_mask[index] ; output_data[index] = output_mask[index] * input_data[index]; } } template<typename T> __global__ void DropblockBackwardKernel( const int N,//gdata.Size() T* gdata, const T* grad, const T* mask ){ CUDA_1D_KERNEL_LOOP(index,N){ gdata[index]=grad[index]*mask[index]; } } template<typename xpu> void DropblockForwardCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType>& req, const std::vector<TBlob> &outputs){ using namespace mshadow; using namespace mshadow::expr; const GPUDropblockParam param =nnvm::get<GPUDropblockParam>(attrs.parsed); if (req[gpudropblock::kOut]!=kNullOp) { CHECK_EQ(inputs.size(),1U); if(ctx.is_train) { CHECK_EQ(outputs.size(),2U); } Stream<gpu> *s=ctx.get_stream<gpu>(); const int count=inputs[gpudropblock::kData].Size(); const int num_batches=inputs[gpudropblock::kData].shape_[0]; const int channels=inputs[gpudropblock::kData].shape_[1]; const int height=inputs[gpudropblock::kData].shape_[2]; const int width=inputs[gpudropblock::kData].shape_[3]; const TBlob &out=outputs[gpudropblock::kOut]; cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); if(ctx.is_train||(param.mode==gpudropblock::kAlways)) { real_t pkeep=param.p; static int iteration=0; static float p_current=1.0; if (p_current>pkeep) { ++iteration; p_current-=((p_current-pkeep)/5000.0)*iteration; } else{ p_current=pkeep; } const int blocksize=param.block_size; index_t feat_size=height; double gamma = ((1 - p_current) / (blocksize * blocksize)) * ((feat_size * feat_size) /((feat_size - blocksize + 1) * (feat_size - blocksize + 1))); index_t mask_reduction=blocksize/2; index_t mask_height,mask_width; if ((blocksize % 2) != 0) { mask_height = height - mask_reduction * 2; mask_width = width - mask_reduction * 2; } else { mask_height = height - mask_reduction * 2 + 1; mask_width = width - mask_reduction * 2 + 1; } index_t mask_area = mask_height * mask_width; index_t random_points_num = static_cast<int>(mask_area * gamma); //实现np.arange()操作 std::vector<int> a; for (int i = 0; i < mask_area; ++i) { a.push_back(i); } std::vector<std::vector<std::vector<int>>> mask(num_batches, std::vector<std::vector<int >>(1,std::vector<int>(mask_area, 0))); //实现random.sample(a,n)的操作 for(int i=0;i<random_points_num;) { index_t randnum=rand()%mask_area; if(a[randnum]!=-100) { a[randnum]=-100; ++i; } } for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < mask_area; ++j) { if (a[j] == -100) { mask[i][0][j] = 1; } } } std::vector<std::vector<std::vector<std::vector<int>>>> mask_new(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( mask_height, std::vector<int>( mask_width)))); //对应 mask=mask.reshape([data.shape[0], 1, mask_height, mask_width]) for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < mask_area; ++k) { index_t mask_i = k / mask_width; index_t mask_j = k % mask_width; mask_new[i][j][mask_i][mask_j] = mask[i][j][k]; } } } //生成卷积所使用的weight_mat std::vector<std::vector<std::vector<std::vector<int>>>> weight_mat(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( blocksize, std::vector<int>( blocksize, 1)))); //卷积前的padding操作 //根据block_size的不同选择不同的padding策略 index_t padding=0; if(blocksize==3) { padding=blocksize/2 +1; } else if (blocksize==5) { padding=ceil(blocksize/2.0)+1; } else if(blocksize>5) { padding=ceil(blocksize/2.0)+2; } index_t padding_height = mask_height + 2 * padding; index_t padding_width = mask_width + 2 * padding; std::vector<std::vector<std::vector<std::vector<int>>>> mask_padding(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( padding_height, std::vector<int>( padding_width)))); for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < padding_height; ++k) { for (int l = 0; l < padding_width; ++l) { if (k < padding || l < padding) { mask_padding[i][j][k][l] = 0; } else if (k > (mask_height + 1) || l > (mask_width + 1)) { mask_padding[i][j][k][l] = 0; } else { mask_padding[i][j][k][l] = mask_new[i][j][k - padding][l - padding]; } } } } } std::vector<std::vector<std::vector<int >>> mask_3d(num_batches, std::vector<std::vector<int >>(1, std::vector<int >( padding_height * padding_width))); for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < padding_height; ++k) { for (int l = 0; l < padding_width; ++l) { mask_3d[i][j][l + k * padding_width] = mask_padding[i][j][k][l]; } } } } //把weightmat平铺为三维数组 std::vector<std::vector<std::vector<int>>> kernel_3d(num_batches, std::vector<std::vector<int>>(1, std::vector<int>( blocksize * blocksize))); for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < blocksize; ++k) { for (int l = 0; l < blocksize; ++l) { kernel_3d[i][j][l + k * blocksize] = weight_mat[i][j][k][l]; } } } } //计算卷积输出矩阵的维数 index_t outm = padding_height - blocksize + 1; //计算卷积过程中的被卷积矩阵的宽和高 index_t convAw = blocksize * blocksize; index_t convAh = padding_height * padding_width; //定义一个卷积过程中的矩阵 std::vector<std::vector<std::vector<int>>> A_convert(num_batches, std::vector<std::vector<int>>(1, std::vector<int>( convAh * convAw))); for (int n = 0; n < num_batches; ++n) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int m = 0; m < outm; ++m) { index_t wh = k * outm * convAw + m * convAw;//k*9*9+m*121 index_t col1 = k * padding_height + m;//k*11+m 0 index_t col2 = (k + 1) * padding_height + m;//(k+1)*11+m 11 index_t col3 = (k + 2) * padding_height + m;//(k+2)*11+m 22 index_t col4 = (k + 3) * padding_height + m;//(k+3)*11+m index_t col5 = (k + 4) * padding_height + m;//(k+4)*11+m index_t col6 = (k + 5) * padding_height + m; index_t col7 = (k + 6) * padding_height + m; if (blocksize == 3) { A_convert[n][j][wh] = mask_3d[n][j][col1]; A_convert[n][j][wh + 1] = mask_3d[n][j][col1 + 1]; A_convert[n][j][wh + 2] = mask_3d[n][j][col1 + 2]; A_convert[n][j][wh + 3] = mask_3d[n][j][col2]; A_convert[n][j][wh + 4] = mask_3d[n][j][col2 + 1]; A_convert[n][j][wh + 5] = mask_3d[n][j][col2 + 2]; A_convert[n][j][wh + 6] = mask_3d[n][j][col3]; A_convert[n][j][wh + 7] = mask_3d[n][j][col3 + 1]; A_convert[n][j][wh + 8] = mask_3d[n][j][col3 + 2]; } else if (blocksize == 5) { A_convert[n][j][wh] = mask_3d[n][j][col1]; A_convert[n][j][wh + 1] = mask_3d[n][j][col1 + 1]; A_convert[n][j][wh + 2] = mask_3d[n][j][col1 + 2]; A_convert[n][j][wh + 3] = mask_3d[n][j][col1 + 3]; A_convert[n][j][wh + 4] = mask_3d[n][j][col1 + 4]; A_convert[n][j][wh + 5] = mask_3d[n][j][col2]; A_convert[n][j][wh + 6] = mask_3d[n][j][col2 + 1]; A_convert[n][j][wh + 7] = mask_3d[n][j][col2 + 2]; A_convert[n][j][wh + 8] = mask_3d[n][j][col2 + 3]; A_convert[n][j][wh + 9] = mask_3d[n][j][col2 + 4]; A_convert[n][j][wh + 10] = mask_3d[n][j][col3]; A_convert[n][j][wh + 11] = mask_3d[n][j][col3 + 1]; A_convert[n][j][wh + 12] = mask_3d[n][j][col3 + 2]; A_convert[n][j][wh + 13] = mask_3d[n][j][col3 + 3]; A_convert[n][j][wh + 14] = mask_3d[n][j][col3 + 4]; A_convert[n][j][wh + 15] = mask_3d[n][j][col4]; A_convert[n][j][wh + 16] = mask_3d[n][j][col4 + 1]; A_convert[n][j][wh + 17] = mask_3d[n][j][col4 + 2]; A_convert[n][j][wh + 18] = mask_3d[n][j][col4 + 3]; A_convert[n][j][wh + 19] = mask_3d[n][j][col4 + 4]; A_convert[n][j][wh + 20] = mask_3d[n][j][col5]; A_convert[n][j][wh + 21] = mask_3d[n][j][col5 + 1]; A_convert[n][j][wh + 22] = mask_3d[n][j][col5 + 2]; A_convert[n][j][wh + 23] = mask_3d[n][j][col5 + 3]; A_convert[n][j][wh + 24] = mask_3d[n][j][col5 + 4]; }else if (blocksize == 7) { A_convert[n][j][wh] = mask_3d[n][j][col1]; A_convert[n][j][wh + 1] = mask_3d[n][j][col1 + 1]; A_convert[n][j][wh + 2] = mask_3d[n][j][col1 + 2]; A_convert[n][j][wh + 3] = mask_3d[n][j][col1 + 3]; A_convert[n][j][wh + 4] = mask_3d[n][j][col1 + 4]; A_convert[n][j][wh + 5] = mask_3d[n][j][col1 + 5]; A_convert[n][j][wh + 6] = mask_3d[n][j][col1 + 6]; A_convert[n][j][wh + 7] = mask_3d[n][j][col2]; A_convert[n][j][wh + 8] = mask_3d[n][j][col2 + 1]; A_convert[n][j][wh + 9] = mask_3d[n][j][col2 + 2]; A_convert[n][j][wh + 10] = mask_3d[n][j][col2 + 3]; A_convert[n][j][wh + 11] = mask_3d[n][j][col2 + 4]; A_convert[n][j][wh + 12] = mask_3d[n][j][col2 + 5]; A_convert[n][j][wh + 13] = mask_3d[n][j][col2 + 6]; A_convert[n][j][wh + 14] = mask_3d[n][j][col3]; A_convert[n][j][wh + 15] = mask_3d[n][j][col3 + 1]; A_convert[n][j][wh + 16] = mask_3d[n][j][col3 + 2]; A_convert[n][j][wh + 17] = mask_3d[n][j][col3 + 3]; A_convert[n][j][wh + 18] = mask_3d[n][j][col3 + 4]; A_convert[n][j][wh + 19] = mask_3d[n][j][col3 + 5]; A_convert[n][j][wh + 20] = mask_3d[n][j][col3 + 6]; A_convert[n][j][wh + 21] = mask_3d[n][j][col4]; A_convert[n][j][wh + 22] = mask_3d[n][j][col4 + 1]; A_convert[n][j][wh + 23] = mask_3d[n][j][col4 + 2]; A_convert[n][j][wh + 24] = mask_3d[n][j][col4 + 3]; A_convert[n][j][wh + 25] = mask_3d[n][j][col4 + 4]; A_convert[n][j][wh + 26] = mask_3d[n][j][col4 + 5]; A_convert[n][j][wh + 27] = mask_3d[n][j][col4 + 6]; A_convert[n][j][wh + 28] = mask_3d[n][j][col5]; A_convert[n][j][wh + 29] = mask_3d[n][j][col5 + 1]; A_convert[n][j][wh + 30] = mask_3d[n][j][col5 + 2]; A_convert[n][j][wh + 31] = mask_3d[n][j][col5 + 3]; A_convert[n][j][wh + 32] = mask_3d[n][j][col5 + 4]; A_convert[n][j][wh + 33] = mask_3d[n][j][col5 + 5]; A_convert[n][j][wh + 34] = mask_3d[n][j][col5 + 6]; A_convert[n][j][wh + 35] = mask_3d[n][j][col6]; A_convert[n][j][wh + 36] = mask_3d[n][j][col6 + 1]; A_convert[n][j][wh + 37] = mask_3d[n][j][col6 + 2]; A_convert[n][j][wh + 38] = mask_3d[n][j][col6 + 3]; A_convert[n][j][wh + 39] = mask_3d[n][j][col6 + 4]; A_convert[n][j][wh + 40] = mask_3d[n][j][col6 + 5]; A_convert[n][j][wh + 41] = mask_3d[n][j][col6 + 6]; A_convert[n][j][wh + 42] = mask_3d[n][j][col7]; A_convert[n][j][wh + 43] = mask_3d[n][j][col7 + 1]; A_convert[n][j][wh + 44] = mask_3d[n][j][col7 + 2]; A_convert[n][j][wh + 45] = mask_3d[n][j][col7 + 3]; A_convert[n][j][wh + 46] = mask_3d[n][j][col7 + 4]; A_convert[n][j][wh + 47] = mask_3d[n][j][col7 + 5]; A_convert[n][j][wh + 48] = mask_3d[n][j][col7 + 6]; } } } } } std::vector<int> conv_cache;//存储卷积完了的数字 for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int l = 0; l < outm; ++l) { int result_one_position = 0; index_t wh = k * outm * convAw + l * convAw; for (int m = 0; m < convAw; ++m) { result_one_position += A_convert[i][j][wh + m] * kernel_3d[i][j][m]; } conv_cache.push_back(result_one_position); } } } } //把卷积完了的数重组为4维数组 std::vector<std::vector<std::vector<std::vector<int>>>> mask_conved(num_batches, std::vector<std::vector<std::vector<int>>>( 1, std::vector<std::vector<int>>( outm, std::vector<int>( outm)))); index_t delta = blocksize / 2; index_t input_height = mask_height + delta * 2; index_t input_width = mask_width + delta * 2; index_t height_to_crop = outm - input_height; index_t width_to_crop = outm - input_width; if (height_to_crop != 0) { for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm - height_to_crop + 1; ++k) { for (int l = 0; l < outm; ++l) { mask_conved[i][j][k][l] = (conv_cache[i * outm * (outm - height_to_crop) + j * outm * (outm - height_to_crop) + k * outm + l]==0)? 1:0; } } } } } if (width_to_crop != 0) { for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int l = 0; l < outm - width_to_crop + 1; ++l) { mask_conved[i][j][k][l] =( conv_cache[i * outm * (outm - width_to_crop) + j * outm * (outm - width_to_crop) + k * (outm - width_to_crop) + l]==0)? 1:0; } } } } } if ((width_to_crop != 0)&&(height_to_crop!=0)) { for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm-height_to_crop+1; ++k) { for (int l = 0; l < outm - width_to_crop + 1; ++l) { mask_conved[i][j][k][l] =( conv_cache[i * (outm-height_to_crop) * (outm - width_to_crop) + j * (outm-height_to_crop) * (outm - width_to_crop) + k * (outm - width_to_crop) + l]==0)? 1:0; } } } } } for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < 1; ++j) { for (int k = 0; k < outm; ++k) { for (int l = 0; l < outm; ++l) { mask_conved[i][j][k][l] =(conv_cache[i * outm * outm + j * outm * outm + k * outm + l]==0)? 1:0; } } } } //把mask_conved变为一个1D的数组来与indata进行计算 int mask_conved_1d[count]; int *dev_mask; for (int i = 0; i < num_batches; ++i) { for (int j = 0; j < channels; ++j) { for (int k = 0; k < height; ++k) { for (int l = 0; l < width; ++l) { mask_conved_1d[i * channels * height * width + j * height * width + k * width + l] = mask_conved[i][0][k][l]; } } } } //allocate memory on GPU cudaMalloc((void**)&dev_mask,count* sizeof(int)); cudaMemcpy(dev_mask,mask_conved_1d,count* sizeof(int),cudaMemcpyHostToDevice); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{ const DType *input_data=inputs[gpudropblock::kData].dptr<DType>(); DType *mask_out=outputs[gpudropblock::kMask].dptr<DType>(); DType *dropblock_out=outputs[gpudropblock::kOut].dptr<DType>(); DropblockForwardKernel<DType><<<ROI_GET_BLOCKS(count),kMaxThreadsPerBlock,0,stream>>>( count,input_data,dropblock_out,mask_out,dev_mask ); }) cudaFree (dev_mask); } else{ const TBlob& data = inputs[gpudropblock::kData]; if (req[gpudropblock::kOut] == kWriteTo) { mxnet_op::copy(s, out, data); } else { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{MXNET_ASSIGN_REQ_SWITCH(req[gpudropblock::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), data.dptr<DType>());//identity:input==output }); }) } } } } template <typename xpu> void DropblockBackwardCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs){ CHECK_EQ(inputs.size(),2U); CHECK_EQ(outputs.size(),1); CHECK_EQ(req.size(),1); using namespace mshadow; using namespace mshadow::expr; std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[gpudropblock::kOut]=inputs[0]; out_data[gpudropblock::kMask]=inputs[1]; Stream<gpu> *s=ctx.get_stream<gpu>(); cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); const GPUDropblockParam param =nnvm::get<GPUDropblockParam>(attrs.parsed); if(ctx.is_train||param.mode==gpudropblock::kAlways) { const TBlob &gdata=outputs[gpudropblock::kData]; const TBlob &grad=out_grads[gpudropblock::kOut]; const TBlob &mask=out_data[gpudropblock::kMask]; const int count=inputs[gpudropblock::kData].Size(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{ DropblockBackwardKernel<DType> <<<ROI_GET_BLOCKS(count), kMaxThreadsPerBlock, 0, stream>>>( count, gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>() ); }) }else{ const TBlob& gdata = outputs[gpudropblock::kData]; const TBlob& grad = out_grads[gpudropblock::kOut]; if (req[gpudropblock::kData] == kWriteTo) { mxnet_op::copy(s, gdata, grad); } else { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_,DType,{MXNET_ASSIGN_REQ_SWITCH(req[gpudropblock::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); });}) } } } NNVM_REGISTER_OP(GPUDropblock) .set_attr<FCompute>("FCompute<gpu>", DropblockForwardCompute<gpu>); NNVM_REGISTER_OP(_backward_GPUDropblock) .set_attr<FCompute>("FCompute<gpu>", DropblockBackwardCompute<gpu>); } // namespace op } // namespace mxnet
ae9c5d929007a9fb435d1e1c3703c5b5aa9dae0c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_bndp.h" #include "cuda_mparticles.cuh" #include "cuda_bits.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "b40c/radixsort_scanscatter_kernel4.h" using namespace b40c_thrust; typedef uint K; typedef uint V; static const int RADIX_BITS = 4; #include <cstdio> #include <cassert> #define THREADS_PER_BLOCK 256 // ---------------------------------------------------------------------- // k_reorder_send_by_id static void __global__ k_reorder_send_by_id(uint nr_prts_send, uint *d_xchg_ids, float4 *d_xi4, float4 *d_pxi4, float4 *d_xchg_xi4, float4 *d_xchg_pxi4) { int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (n >= nr_prts_send) { return; } uint id = d_xchg_ids[n]; d_xchg_xi4[n] = d_xi4[id]; d_xchg_pxi4[n] = d_pxi4[id]; } // ---------------------------------------------------------------------- // reorder_send_by_id // // copies particles to be sent to contiguous area following // the existing n_prts particles // // in: d_id[n_prts - n_prts_send:n_prts_send[ // in: d_xi4, d_pxi4[0:n_prts[ // out: d_xi4, d_pxi4[n_prts:n_prts_send[ template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::reorder_send_by_id(CudaMparticles* cmprts, uint n_prts_send) { cmprts->storage.xi4.resize(cmprts->n_prts + n_prts_send); cmprts->storage.pxi4.resize(cmprts->n_prts + n_prts_send); if (n_prts_send == 0) { return; } int dimGrid = (n_prts_send + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( k_reorder_send_by_id), dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, n_prts_send, cmprts->by_block_.d_id.data().get() + cmprts->n_prts - n_prts_send, cmprts->storage.xi4.data().get(), cmprts->storage.pxi4.data().get(), cmprts->storage.xi4.data().get() + cmprts->n_prts, cmprts->storage.pxi4.data().get() + cmprts->n_prts); cuda_sync_if_enabled(); } // ---------------------------------------------------------------------- // reorder_send_by_id_gold template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::reorder_send_by_id_gold(CudaMparticles *cmprts, uint n_prts_send) { thrust::host_vector<uint> h_id(cmprts->by_block_.d_id.data(), cmprts->by_block_.d_id.data() + cmprts->n_prts); thrust::host_vector<float4> h_xi4(cmprts->storage.xi4.data(), cmprts->storage.xi4.data() + cmprts->n_prts + n_prts_send); thrust::host_vector<float4> h_pxi4(cmprts->storage.pxi4.data(), cmprts->storage.pxi4.data() + cmprts->n_prts + n_prts_send); for (int n = 0; n < n_prts_send; n++) { uint id = h_id[cmprts->n_prts - n_prts_send + n]; h_xi4[cmprts->n_prts + n] = h_xi4[id]; h_pxi4[cmprts->n_prts + n] = h_pxi4[id]; } thrust::copy(h_xi4.begin(), h_xi4.end(), cmprts->storage.xi4.data()); thrust::copy(h_pxi4.begin(), h_pxi4.end(), cmprts->storage.pxi4.data()); } // ---------------------------------------------------------------------- // k_reorder_send_buf_total __global__ static void k_reorder_send_buf_total(int nr_prts, int nr_total_blocks, uint *d_bidx, uint *d_sums, float4 *d_xi4, float4 *d_pxi4, float4 *d_xchg_xi4, float4 *d_xchg_pxi4) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i >= nr_prts) return; if (d_bidx[i] == CUDA_BND_S_OOB) { int j = d_sums[i]; d_xchg_xi4[j] = d_xi4[i]; d_xchg_pxi4[j] = d_pxi4[i]; } } // ---------------------------------------------------------------------- // reorder_send_buf_total template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::reorder_send_buf_total(CudaMparticles *cmprts, uint n_prts_send) { if (n_patches() == 0) { return; } cmprts->resize(cmprts->n_prts + n_prts_send); float4 *xchg_xi4 = cmprts->storage.xi4.data().get() + cmprts->n_prts; float4 *xchg_pxi4 = cmprts->storage.pxi4.data().get() + cmprts->n_prts; dim3 dimBlock(THREADS_PER_BLOCK, 1); dim3 dimGrid((cmprts->n_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1); hipLaunchKernelGGL(( k_reorder_send_buf_total), dim3(dimGrid), dim3(dimBlock), 0, 0, cmprts->n_prts, cmprts->n_blocks, cmprts->by_block_.d_idx.data().get(), d_sums.data().get(), cmprts->storage.xi4.data().get(), cmprts->storage.pxi4.data().get(), xchg_xi4, xchg_pxi4); cuda_sync_if_enabled(); } // ---------------------------------------------------------------------- // scan_send_buf_total // // in: d_spine_cnts // out: d_spine_sums, d_id[n_prts - n_prts_send: n_prts[ // template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::scan_send_buf_total(CudaMparticles* cmprts, uint n_prts_send) { // OPT, we could do this from the beginning and adapt find_n_send() thrust::exclusive_scan(d_spine_cnts.data() + n_blocks * 10, d_spine_cnts.data() + n_blocks * 11 + 1, d_spine_sums.data() + n_blocks * 10, cmprts->n_prts - n_prts_send); // OPT, we could somehow not fill in ids for not oob at all // this should make sure at least those within bounds don't screw anything up thrust::fill(d_spine_sums.data(), d_spine_sums.data() + n_blocks * 10, 0); Int3 mx = b_mx(); if (mx[0] == 1 && mx[1] == 4 && mx[2] == 4) { hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 4, 4>) , dim3(n_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0, d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 8 && mx[2] == 8) { hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 8, 8>) , dim3(n_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0, d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 16 && mx[2] == 16) { hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 16, 16>) , dim3(n_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0, d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 32 && mx[2] == 32) { hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 32, 32>) , dim3(n_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0, d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 64 && mx[2] == 64) { hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 64, 64>) , dim3(n_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0, d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 128 && mx[2] == 128) { hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 128, 128>) , dim3(n_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0, d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else { printf("no support for b_mx %d x %d x %d!\n", mx[0], mx[1], mx[2]); assert(0); } cuda_sync_if_enabled(); reorder_send_by_id(cmprts, n_prts_send); } // ---------------------------------------------------------------------- // scan_send_buf_total_gold template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::scan_send_buf_total_gold(CudaMparticles *cmprts, uint n_prts_send) { thrust::host_vector<uint> h_off(cmprts->by_block_.d_off); thrust::host_vector<uint> h_bidx(cmprts->by_block_.d_idx.data(), cmprts->by_block_.d_idx.data() + cmprts->n_prts); thrust::host_vector<uint> h_sums(cmprts->n_prts); for (uint bid = 0; bid < n_blocks; bid++) { uint sum = d_spine_sums[n_blocks * 10 + bid]; for (int n = h_off[bid]; n < h_off[bid+1]; n++) { if (h_bidx[n] == CUDA_BND_S_OOB) { h_sums[n] = sum; sum++; } } } d_sums.resize(cmprts->n_prts); thrust::copy(h_sums.begin(), h_sums.end(), d_sums.begin()); reorder_send_buf_total(cmprts, n_prts_send); } template struct cuda_bndp<cuda_mparticles<BS144>, dim_yz>;
ae9c5d929007a9fb435d1e1c3703c5b5aa9dae0c.cu
#include "cuda_bndp.h" #include "cuda_mparticles.cuh" #include "cuda_bits.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "b40c/radixsort_scanscatter_kernel4.h" using namespace b40c_thrust; typedef uint K; typedef uint V; static const int RADIX_BITS = 4; #include <cstdio> #include <cassert> #define THREADS_PER_BLOCK 256 // ---------------------------------------------------------------------- // k_reorder_send_by_id static void __global__ k_reorder_send_by_id(uint nr_prts_send, uint *d_xchg_ids, float4 *d_xi4, float4 *d_pxi4, float4 *d_xchg_xi4, float4 *d_xchg_pxi4) { int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (n >= nr_prts_send) { return; } uint id = d_xchg_ids[n]; d_xchg_xi4[n] = d_xi4[id]; d_xchg_pxi4[n] = d_pxi4[id]; } // ---------------------------------------------------------------------- // reorder_send_by_id // // copies particles to be sent to contiguous area following // the existing n_prts particles // // in: d_id[n_prts - n_prts_send:n_prts_send[ // in: d_xi4, d_pxi4[0:n_prts[ // out: d_xi4, d_pxi4[n_prts:n_prts_send[ template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::reorder_send_by_id(CudaMparticles* cmprts, uint n_prts_send) { cmprts->storage.xi4.resize(cmprts->n_prts + n_prts_send); cmprts->storage.pxi4.resize(cmprts->n_prts + n_prts_send); if (n_prts_send == 0) { return; } int dimGrid = (n_prts_send + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; k_reorder_send_by_id<<<dimGrid, THREADS_PER_BLOCK>>> (n_prts_send, cmprts->by_block_.d_id.data().get() + cmprts->n_prts - n_prts_send, cmprts->storage.xi4.data().get(), cmprts->storage.pxi4.data().get(), cmprts->storage.xi4.data().get() + cmprts->n_prts, cmprts->storage.pxi4.data().get() + cmprts->n_prts); cuda_sync_if_enabled(); } // ---------------------------------------------------------------------- // reorder_send_by_id_gold template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::reorder_send_by_id_gold(CudaMparticles *cmprts, uint n_prts_send) { thrust::host_vector<uint> h_id(cmprts->by_block_.d_id.data(), cmprts->by_block_.d_id.data() + cmprts->n_prts); thrust::host_vector<float4> h_xi4(cmprts->storage.xi4.data(), cmprts->storage.xi4.data() + cmprts->n_prts + n_prts_send); thrust::host_vector<float4> h_pxi4(cmprts->storage.pxi4.data(), cmprts->storage.pxi4.data() + cmprts->n_prts + n_prts_send); for (int n = 0; n < n_prts_send; n++) { uint id = h_id[cmprts->n_prts - n_prts_send + n]; h_xi4[cmprts->n_prts + n] = h_xi4[id]; h_pxi4[cmprts->n_prts + n] = h_pxi4[id]; } thrust::copy(h_xi4.begin(), h_xi4.end(), cmprts->storage.xi4.data()); thrust::copy(h_pxi4.begin(), h_pxi4.end(), cmprts->storage.pxi4.data()); } // ---------------------------------------------------------------------- // k_reorder_send_buf_total __global__ static void k_reorder_send_buf_total(int nr_prts, int nr_total_blocks, uint *d_bidx, uint *d_sums, float4 *d_xi4, float4 *d_pxi4, float4 *d_xchg_xi4, float4 *d_xchg_pxi4) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i >= nr_prts) return; if (d_bidx[i] == CUDA_BND_S_OOB) { int j = d_sums[i]; d_xchg_xi4[j] = d_xi4[i]; d_xchg_pxi4[j] = d_pxi4[i]; } } // ---------------------------------------------------------------------- // reorder_send_buf_total template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::reorder_send_buf_total(CudaMparticles *cmprts, uint n_prts_send) { if (n_patches() == 0) { return; } cmprts->resize(cmprts->n_prts + n_prts_send); float4 *xchg_xi4 = cmprts->storage.xi4.data().get() + cmprts->n_prts; float4 *xchg_pxi4 = cmprts->storage.pxi4.data().get() + cmprts->n_prts; dim3 dimBlock(THREADS_PER_BLOCK, 1); dim3 dimGrid((cmprts->n_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1); k_reorder_send_buf_total<<<dimGrid, dimBlock>>>(cmprts->n_prts, cmprts->n_blocks, cmprts->by_block_.d_idx.data().get(), d_sums.data().get(), cmprts->storage.xi4.data().get(), cmprts->storage.pxi4.data().get(), xchg_xi4, xchg_pxi4); cuda_sync_if_enabled(); } // ---------------------------------------------------------------------- // scan_send_buf_total // // in: d_spine_cnts // out: d_spine_sums, d_id[n_prts - n_prts_send: n_prts[ // template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::scan_send_buf_total(CudaMparticles* cmprts, uint n_prts_send) { // OPT, we could do this from the beginning and adapt find_n_send() thrust::exclusive_scan(d_spine_cnts.data() + n_blocks * 10, d_spine_cnts.data() + n_blocks * 11 + 1, d_spine_sums.data() + n_blocks * 10, cmprts->n_prts - n_prts_send); // OPT, we could somehow not fill in ids for not oob at all // this should make sure at least those within bounds don't screw anything up thrust::fill(d_spine_sums.data(), d_spine_sums.data() + n_blocks * 10, 0); Int3 mx = b_mx(); if (mx[0] == 1 && mx[1] == 4 && mx[2] == 4) { ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 4, 4> <<<n_blocks, B40C_RADIXSORT_THREADS>>> (d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 8 && mx[2] == 8) { ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 8, 8> <<<n_blocks, B40C_RADIXSORT_THREADS>>> (d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 16 && mx[2] == 16) { ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 16, 16> <<<n_blocks, B40C_RADIXSORT_THREADS>>> (d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 32 && mx[2] == 32) { ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 32, 32> <<<n_blocks, B40C_RADIXSORT_THREADS>>> (d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 64 && mx[2] == 64) { ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 64, 64> <<<n_blocks, B40C_RADIXSORT_THREADS>>> (d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else if (mx[0] == 1 && mx[1] == 128 && mx[2] == 128) { ScanScatterDigits4<K, V, 0, RADIX_BITS, 0, NopFunctor<K>, NopFunctor<K>, 128, 128> <<<n_blocks, B40C_RADIXSORT_THREADS>>> (d_spine_sums.data().get(), cmprts->by_block_.d_idx.data().get(), cmprts->by_block_.d_id.data().get(), cmprts->by_block_.d_off.data().get(), n_blocks); } else { printf("no support for b_mx %d x %d x %d!\n", mx[0], mx[1], mx[2]); assert(0); } cuda_sync_if_enabled(); reorder_send_by_id(cmprts, n_prts_send); } // ---------------------------------------------------------------------- // scan_send_buf_total_gold template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::scan_send_buf_total_gold(CudaMparticles *cmprts, uint n_prts_send) { thrust::host_vector<uint> h_off(cmprts->by_block_.d_off); thrust::host_vector<uint> h_bidx(cmprts->by_block_.d_idx.data(), cmprts->by_block_.d_idx.data() + cmprts->n_prts); thrust::host_vector<uint> h_sums(cmprts->n_prts); for (uint bid = 0; bid < n_blocks; bid++) { uint sum = d_spine_sums[n_blocks * 10 + bid]; for (int n = h_off[bid]; n < h_off[bid+1]; n++) { if (h_bidx[n] == CUDA_BND_S_OOB) { h_sums[n] = sum; sum++; } } } d_sums.resize(cmprts->n_prts); thrust::copy(h_sums.begin(), h_sums.end(), d_sums.begin()); reorder_send_buf_total(cmprts, n_prts_send); } template struct cuda_bndp<cuda_mparticles<BS144>, dim_yz>;
377497a9dfc3f85f50bc04afe69a12fe7bc8098d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/image/paste/paste.h" #include <utility> #include <vector> #include <algorithm> namespace dali { #define PASTE_BLOCKSIZE 512 namespace { __global__ __launch_bounds__(PASTE_BLOCKSIZE, 1) void BatchedPaste( const int N, const int C, const uint8* const __restrict__ fill_value, const uint8* const * const __restrict__ in_batch, uint8* const* const __restrict__ out_batch, const int* const __restrict__ in_out_dims_paste_yx) { const int n = blockIdx.x; constexpr int blockSize = PASTE_BLOCKSIZE; constexpr int nThreadsPerWave = 32; // 1 warp per row constexpr int nWaves = blockSize / nThreadsPerWave; constexpr int MAX_C = 1024; __shared__ uint8 rgb[MAX_C]; __shared__ int jump[MAX_C]; for (int i = threadIdx.x; i < C; i += blockDim.x) { rgb[i] = fill_value[i % C]; jump[i] = (i + nThreadsPerWave) % C; } const int offset = n*6; const int in_H = in_out_dims_paste_yx[offset]; const int in_W = in_out_dims_paste_yx[offset + 1]; const int out_H = in_out_dims_paste_yx[offset + 2]; const int out_W = in_out_dims_paste_yx[offset + 3]; const int paste_y = in_out_dims_paste_yx[offset + 4]; const int paste_x = in_out_dims_paste_yx[offset + 5]; const uint8* const input_ptr = in_batch[n]; uint8 * const output_ptr = out_batch[n]; __syncthreads(); const int myWave = threadIdx.x / nThreadsPerWave; const int myId = threadIdx.x % nThreadsPerWave; const int paste_x_stride = paste_x * C; const int in_stride = in_W * C; const int startC = myId % C; for (int h = myWave; h < out_H; h += nWaves) { const int H = h * out_W * C; const int in_h = h - paste_y; const bool h_in_range = in_h >= 0 && in_h < in_H; if (h_in_range) { int c = startC; for (int i = myId; i < paste_x * C; i += nThreadsPerWave) { const int out_idx = H + i; output_ptr[out_idx] = rgb[c]; c = jump[c]; } const int current_in_stride = in_h*in_stride - paste_x_stride; for (int i = myId + paste_x_stride; i < paste_x_stride + in_W * C; i += nThreadsPerWave) { const int out_idx = H + i; const int in_idx = current_in_stride + i; output_ptr[out_idx] = input_ptr[in_idx]; } c = startC; for (int i = myId + (paste_x + in_W) * C; i < out_W * C; i += nThreadsPerWave) { const int out_idx = H + i; output_ptr[out_idx] = rgb[c]; c = jump[c]; } } else { int c = startC; for (int i = myId; i < out_W * C; i += nThreadsPerWave) { const int out_idx = H + i; output_ptr[out_idx] = rgb[c]; c = jump[c]; } } } } } // namespace template<> void Paste<GPUBackend>::RunHelper(DeviceWorkspace &ws) { auto curr_batch_size = ws.GetInputBatchSize(0); fill_value_.set_order(ws.stream()); hipLaunchKernelGGL(( BatchedPaste), dim3(curr_batch_size), dim3(PASTE_BLOCKSIZE), 0, ws.stream(), curr_batch_size, C_, fill_value_.template data<uint8>(), input_ptrs_gpu_.template data<const uint8*>(), output_ptrs_gpu_.template data<uint8*>(), in_out_dims_paste_yx_gpu_.template data<int>()); } template<> void Paste<GPUBackend>::SetupSharedSampleParams(DeviceWorkspace &ws) { // No setup shared between input sets } template<> void Paste<GPUBackend>::SetupSampleParams(DeviceWorkspace &ws) { auto &input = ws.Input<GPUBackend>(0); auto &output = ws.Output<GPUBackend>(0); auto curr_batch_size = ws.GetInputBatchSize(0); std::vector<TensorShape<>> output_shape(curr_batch_size); for (int i = 0; i < curr_batch_size; ++i) { auto input_shape = input.tensor_shape(i); DALI_ENFORCE(input_shape.size() == 3, "Expects 3-dimensional image input."); int H = input_shape[0]; int W = input_shape[1]; C_ = input_shape[2]; float ratio = spec_.GetArgument<float>("ratio", &ws, i); DALI_ENFORCE(ratio >= 1., "ratio of less than 1 is not supported"); int new_H = static_cast<int>(ratio * H); int new_W = static_cast<int>(ratio * W); int min_canvas_size_ = spec_.GetArgument<float>("min_canvas_size", &ws, i); DALI_ENFORCE(min_canvas_size_ >= 0., "min_canvas_size_ of less than 0 is not supported"); new_H = ::max(new_H, static_cast<int>(min_canvas_size_)); new_W = ::max(new_W, static_cast<int>(min_canvas_size_)); output_shape[i] = {new_H, new_W, C_}; float paste_x_ = spec_.GetArgument<float>("paste_x", &ws, i); float paste_y_ = spec_.GetArgument<float>("paste_y", &ws, i); DALI_ENFORCE(paste_x_ >= 0, "paste_x of less than 0 is not supported"); DALI_ENFORCE(paste_x_ <= 1, "paste_x_ of more than 1 is not supported"); DALI_ENFORCE(paste_y_ >= 0, "paste_y_ of less than 0 is not supported"); DALI_ENFORCE(paste_y_ <= 1, "paste_y_ of more than 1 is not supported"); int paste_x = paste_x_ * (new_W - W); int paste_y = paste_y_ * (new_H - H); int sample_dims_paste_yx[] = {H, W, new_H, new_W, paste_y, paste_x}; int *sample_data = in_out_dims_paste_yx_.template mutable_data<int>() + (i*NUM_INDICES); std::copy(sample_dims_paste_yx, sample_dims_paste_yx + NUM_INDICES, sample_data); } output.Resize(output_shape, input.type()); output.SetLayout("HWC"); for (int i = 0; i < curr_batch_size; ++i) { input_ptrs_.template mutable_data<const uint8*>()[i] = input.template tensor<uint8>(i); output_ptrs_.template mutable_data<uint8*>()[i] = output.template mutable_tensor<uint8>(i); } // Copy pointers on the GPU for fast access input_ptrs_gpu_.Copy(input_ptrs_, ws.stream()); output_ptrs_gpu_.Copy(output_ptrs_, ws.stream()); in_out_dims_paste_yx_gpu_.Copy(in_out_dims_paste_yx_, ws.stream()); } template<> void Paste<GPUBackend>::RunImpl(DeviceWorkspace &ws) { SetupSampleParams(ws); RunHelper(ws); } DALI_REGISTER_OPERATOR(Paste, Paste<GPUBackend>, GPU); } // namespace dali
377497a9dfc3f85f50bc04afe69a12fe7bc8098d.cu
// Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/image/paste/paste.h" #include <utility> #include <vector> #include <algorithm> namespace dali { #define PASTE_BLOCKSIZE 512 namespace { __global__ __launch_bounds__(PASTE_BLOCKSIZE, 1) void BatchedPaste( const int N, const int C, const uint8* const __restrict__ fill_value, const uint8* const * const __restrict__ in_batch, uint8* const* const __restrict__ out_batch, const int* const __restrict__ in_out_dims_paste_yx) { const int n = blockIdx.x; constexpr int blockSize = PASTE_BLOCKSIZE; constexpr int nThreadsPerWave = 32; // 1 warp per row constexpr int nWaves = blockSize / nThreadsPerWave; constexpr int MAX_C = 1024; __shared__ uint8 rgb[MAX_C]; __shared__ int jump[MAX_C]; for (int i = threadIdx.x; i < C; i += blockDim.x) { rgb[i] = fill_value[i % C]; jump[i] = (i + nThreadsPerWave) % C; } const int offset = n*6; const int in_H = in_out_dims_paste_yx[offset]; const int in_W = in_out_dims_paste_yx[offset + 1]; const int out_H = in_out_dims_paste_yx[offset + 2]; const int out_W = in_out_dims_paste_yx[offset + 3]; const int paste_y = in_out_dims_paste_yx[offset + 4]; const int paste_x = in_out_dims_paste_yx[offset + 5]; const uint8* const input_ptr = in_batch[n]; uint8 * const output_ptr = out_batch[n]; __syncthreads(); const int myWave = threadIdx.x / nThreadsPerWave; const int myId = threadIdx.x % nThreadsPerWave; const int paste_x_stride = paste_x * C; const int in_stride = in_W * C; const int startC = myId % C; for (int h = myWave; h < out_H; h += nWaves) { const int H = h * out_W * C; const int in_h = h - paste_y; const bool h_in_range = in_h >= 0 && in_h < in_H; if (h_in_range) { int c = startC; for (int i = myId; i < paste_x * C; i += nThreadsPerWave) { const int out_idx = H + i; output_ptr[out_idx] = rgb[c]; c = jump[c]; } const int current_in_stride = in_h*in_stride - paste_x_stride; for (int i = myId + paste_x_stride; i < paste_x_stride + in_W * C; i += nThreadsPerWave) { const int out_idx = H + i; const int in_idx = current_in_stride + i; output_ptr[out_idx] = input_ptr[in_idx]; } c = startC; for (int i = myId + (paste_x + in_W) * C; i < out_W * C; i += nThreadsPerWave) { const int out_idx = H + i; output_ptr[out_idx] = rgb[c]; c = jump[c]; } } else { int c = startC; for (int i = myId; i < out_W * C; i += nThreadsPerWave) { const int out_idx = H + i; output_ptr[out_idx] = rgb[c]; c = jump[c]; } } } } } // namespace template<> void Paste<GPUBackend>::RunHelper(DeviceWorkspace &ws) { auto curr_batch_size = ws.GetInputBatchSize(0); fill_value_.set_order(ws.stream()); BatchedPaste<<<curr_batch_size, PASTE_BLOCKSIZE, 0, ws.stream()>>>( curr_batch_size, C_, fill_value_.template data<uint8>(), input_ptrs_gpu_.template data<const uint8*>(), output_ptrs_gpu_.template data<uint8*>(), in_out_dims_paste_yx_gpu_.template data<int>()); } template<> void Paste<GPUBackend>::SetupSharedSampleParams(DeviceWorkspace &ws) { // No setup shared between input sets } template<> void Paste<GPUBackend>::SetupSampleParams(DeviceWorkspace &ws) { auto &input = ws.Input<GPUBackend>(0); auto &output = ws.Output<GPUBackend>(0); auto curr_batch_size = ws.GetInputBatchSize(0); std::vector<TensorShape<>> output_shape(curr_batch_size); for (int i = 0; i < curr_batch_size; ++i) { auto input_shape = input.tensor_shape(i); DALI_ENFORCE(input_shape.size() == 3, "Expects 3-dimensional image input."); int H = input_shape[0]; int W = input_shape[1]; C_ = input_shape[2]; float ratio = spec_.GetArgument<float>("ratio", &ws, i); DALI_ENFORCE(ratio >= 1., "ratio of less than 1 is not supported"); int new_H = static_cast<int>(ratio * H); int new_W = static_cast<int>(ratio * W); int min_canvas_size_ = spec_.GetArgument<float>("min_canvas_size", &ws, i); DALI_ENFORCE(min_canvas_size_ >= 0., "min_canvas_size_ of less than 0 is not supported"); new_H = std::max(new_H, static_cast<int>(min_canvas_size_)); new_W = std::max(new_W, static_cast<int>(min_canvas_size_)); output_shape[i] = {new_H, new_W, C_}; float paste_x_ = spec_.GetArgument<float>("paste_x", &ws, i); float paste_y_ = spec_.GetArgument<float>("paste_y", &ws, i); DALI_ENFORCE(paste_x_ >= 0, "paste_x of less than 0 is not supported"); DALI_ENFORCE(paste_x_ <= 1, "paste_x_ of more than 1 is not supported"); DALI_ENFORCE(paste_y_ >= 0, "paste_y_ of less than 0 is not supported"); DALI_ENFORCE(paste_y_ <= 1, "paste_y_ of more than 1 is not supported"); int paste_x = paste_x_ * (new_W - W); int paste_y = paste_y_ * (new_H - H); int sample_dims_paste_yx[] = {H, W, new_H, new_W, paste_y, paste_x}; int *sample_data = in_out_dims_paste_yx_.template mutable_data<int>() + (i*NUM_INDICES); std::copy(sample_dims_paste_yx, sample_dims_paste_yx + NUM_INDICES, sample_data); } output.Resize(output_shape, input.type()); output.SetLayout("HWC"); for (int i = 0; i < curr_batch_size; ++i) { input_ptrs_.template mutable_data<const uint8*>()[i] = input.template tensor<uint8>(i); output_ptrs_.template mutable_data<uint8*>()[i] = output.template mutable_tensor<uint8>(i); } // Copy pointers on the GPU for fast access input_ptrs_gpu_.Copy(input_ptrs_, ws.stream()); output_ptrs_gpu_.Copy(output_ptrs_, ws.stream()); in_out_dims_paste_yx_gpu_.Copy(in_out_dims_paste_yx_, ws.stream()); } template<> void Paste<GPUBackend>::RunImpl(DeviceWorkspace &ws) { SetupSampleParams(ws); RunHelper(ws); } DALI_REGISTER_OPERATOR(Paste, Paste<GPUBackend>, GPU); } // namespace dali
63b39674aa1d57eb3727c261c12d1b76bb65d9e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Azzam Haidar @author Ahmad Ahmad @generated from magmablas/zpotf2_kernels.cu, normal z -> d, Mon Jun 25 18:24:15 2018 */ #include "magma_internal.h" #include "batched_kernel_param.h" #define PRECISION_d #if defined(VERSION31) #define ENABLE_COND1 #define ENABLE_COND2 #define ENABLE_COND4 #define ENABLE_COND5 #define ENABLE_COND6 #endif #define MAX_NTCOL 8 #if defined(PRECISION_s) #define NTCOL2 (4) #define NTCOL1 (8) #elif defined(PRECISION_d) #define NTCOL2 (2) #define NTCOL1 (4) #else #define NTCOL2 (1) #define NTCOL1 (1) #endif #include "dpotf2_devicesfunc.cuh" #define A(i_, j_) (dA + (i_) + (j_)*ldda) /******************************************************************************/ __global__ void dpotf2_smlpin_fixwidth_kernel(int m, double *dA, int ldda, int localstep, int gbstep, magma_int_t *dinfo) { #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ //if(threadIdx.x < m-i){ dpotf2_smlpout_fixwidth_device(m-i, A(localstep+i, 0), A(localstep+i, localstep+i), ldda, localstep+i, gbstep, dinfo); //} } } /******************************************************************************/ __global__ void dpotf2_smlpin_anywidth_kernel(int m, double *dA, int ldda, int localstep, int gbstep, magma_int_t *dinfo) { #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ int ib = min(m-i, POTF2_NB); //if(threadIdx.x < m-i){ dpotf2_smlpout_anywidth_device(m-i, ib, A(localstep+i, 0), A(localstep+i, localstep+i), ldda, localstep+i, gbstep, dinfo); //} } } /******************************************************************************/ __global__ void dpotf2_smlpin_fixwidth_kernel_batched(int m, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ //if(threadIdx.x < m-i){ dpotf2_smlpout_fixwidth_device(m-i, dA+localstep+i, dA+localstep+i+(localstep+i)*lda, lda, localstep+i, gbstep, &(info_array[batchid])); //} } } /******************************************************************************/ __global__ void dpotf2_smlpin_anywidth_kernel_batched(int m, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ int ib = min(m-i, POTF2_NB); //if(threadIdx.x < m-i){ dpotf2_smlpout_anywidth_device(m-i, ib, dA+localstep+i, dA+localstep+i+(localstep+i)*lda, lda, localstep+i, gbstep, &(info_array[batchid])); //} } } /******************************************************************************/ __global__ void dpotf2_smlpout_fixwidth_kernel(int m, double *dA, int lda, int localstep, int gbstep, magma_int_t *dinfo) { dpotf2_smlpout_fixwidth_device(m, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, dinfo ); } /******************************************************************************/ __global__ void dpotf2_smlpout_anywidth_kernel(int m, int n, double *dA, int lda, int localstep, int gbstep, magma_int_t *dinfo) { dpotf2_smlpout_anywidth_device(m, n, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, dinfo ); } /******************************************************************************/ __global__ void dpotf2_smlpout_fixwidth_kernel_batched(int m, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; dpotf2_smlpout_fixwidth_device(m, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ __global__ void dpotf2_smlpout_anywidth_kernel_batched(int m, int n, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; dpotf2_smlpout_anywidth_device(m, n, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ extern "C" magma_int_t magma_dpotrf_lpout_batched( magma_uplo_t uplo, magma_int_t n, double **dA_array, magma_int_t ai, magma_int_t aj, magma_int_t lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magma_int_t roundup_m = m; // rounding up need more investigation since it coul dmodify the matrix out of its bound //magma_int_t m8 = magma_roundup( m, 8 ); //magma_int_t roundup_m = m8 > lda ? m : m8; //magma_int_t m32 = magma_roundup( m, 32 ); //magma_int_t roundup_m = m32 > lda ? m : m32; magma_int_t ib, rows; for (magma_int_t j = 0; j < n; j += POTF2_NB) { ib = min(POTF2_NB, n-j); rows = roundup_m-j; // tuning ntcol magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes if (rows > 64) ntcol = 1; else if (rows > 32) ntcol = NTCOL2; else ntcol = NTCOL1; // end of tuning ntcol const magma_int_t nTB = magma_ceildiv( batchCount, ntcol ); dim3 dimGrid(1, 1, nTB); magma_int_t nbth = rows; magma_int_t shared_mem_size = ntcol * (sizeof(double)*(nbth+POTF2_NB)*POTF2_NB); dim3 threads(nbth, ntcol); if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if (ib == POTF2_NB) { hipLaunchKernelGGL(( dpotf2_smlpout_fixwidth_kernel_batched) , dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() , rows, dA_array, ai, aj, lda, j, gbstep, info_array, batchCount); } else { hipLaunchKernelGGL(( dpotf2_smlpout_anywidth_kernel_batched) , dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() , rows, ib, dA_array, ai, aj, lda, j, gbstep, info_array, batchCount); } } return arginfo; } /******************************************************************************/ extern "C" magma_int_t magma_dpotrf_lpin_batched( magma_uplo_t uplo, magma_int_t n, double **dA_array, magma_int_t ai, magma_int_t aj, magma_int_t lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); magma_int_t shared_mem_size = sizeof(double) * (n+POTF2_NB)*POTF2_NB; if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( n % POTF2_NB == 0){ hipLaunchKernelGGL(( dpotf2_smlpin_fixwidth_kernel_batched) , dim3(grid), dim3(threads), shared_mem_size, queue->cuda_stream() , n, dA_array, ai, aj, lda, 0, gbstep, info_array, batchCount); } else{ hipLaunchKernelGGL(( dpotf2_smlpin_anywidth_kernel_batched) , dim3(grid), dim3(threads), shared_mem_size, queue->cuda_stream() , n, dA_array, ai, aj, lda, 0, gbstep, info_array, batchCount); } return arginfo; } /******************************************************************************/ extern "C" magma_int_t magma_dpotf2_lpout( magma_uplo_t uplo, magma_int_t n, double *dA, magma_int_t lda, magma_int_t gbstep, magma_int_t *dinfo, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magma_int_t roundup_m = m; // rounding up need more investigation since it coul dmodify the matrix out of its bound //magma_int_t m8 = magma_roundup( m, 8 ); //magma_int_t roundup_m = m8 > lda ? m : m8; //magma_int_t m32 = magma_roundup( m, 32 ); //magma_int_t roundup_m = m32 > lda ? m : m32; magma_int_t ib, rows; for (magma_int_t j = 0; j < n; j += POTF2_NB) { ib = min(POTF2_NB, n-j); rows = roundup_m-j; dim3 dimGrid(1, 1, 1); magma_int_t nbth = rows; magma_int_t shared_mem_size = sizeof(double)*(nbth+POTF2_NB)*POTF2_NB; dim3 threads(nbth, 1, 1); if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if (ib == POTF2_NB) { hipLaunchKernelGGL(( dpotf2_smlpout_fixwidth_kernel) , dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() , rows, dA, lda, j, gbstep, dinfo ); } else { hipLaunchKernelGGL(( dpotf2_smlpout_anywidth_kernel) , dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() , rows, ib, dA, lda, j, gbstep, dinfo ); } } return arginfo; } /******************************************************************************/ extern "C" magma_int_t magma_dpotf2_lpin( magma_uplo_t uplo, magma_int_t n, double *dA, magma_int_t ldda, magma_int_t gbstep, magma_int_t *dinfo, magma_queue_t queue) { magma_int_t arginfo = 0; // Quick return if possible if ( n == 0 ) { return arginfo; } dim3 grid(1, 1, 1); dim3 threads(n, 1, 1); magma_int_t shared_mem_size = sizeof(double) * (n+POTF2_NB)*POTF2_NB; if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( n % POTF2_NB == 0){ hipLaunchKernelGGL(( dpotf2_smlpin_fixwidth_kernel) , dim3(grid), dim3(threads), shared_mem_size, queue->cuda_stream() , n, dA, ldda, 0, gbstep, dinfo); } else{ hipLaunchKernelGGL(( dpotf2_smlpin_anywidth_kernel) , dim3(grid), dim3(threads), shared_mem_size, queue->cuda_stream() , n, dA, ldda, 0, gbstep, dinfo); } return arginfo; }
63b39674aa1d57eb3727c261c12d1b76bb65d9e2.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Azzam Haidar @author Ahmad Ahmad @generated from magmablas/zpotf2_kernels.cu, normal z -> d, Mon Jun 25 18:24:15 2018 */ #include "magma_internal.h" #include "batched_kernel_param.h" #define PRECISION_d #if defined(VERSION31) #define ENABLE_COND1 #define ENABLE_COND2 #define ENABLE_COND4 #define ENABLE_COND5 #define ENABLE_COND6 #endif #define MAX_NTCOL 8 #if defined(PRECISION_s) #define NTCOL2 (4) #define NTCOL1 (8) #elif defined(PRECISION_d) #define NTCOL2 (2) #define NTCOL1 (4) #else #define NTCOL2 (1) #define NTCOL1 (1) #endif #include "dpotf2_devicesfunc.cuh" #define A(i_, j_) (dA + (i_) + (j_)*ldda) /******************************************************************************/ __global__ void dpotf2_smlpin_fixwidth_kernel(int m, double *dA, int ldda, int localstep, int gbstep, magma_int_t *dinfo) { #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ //if(threadIdx.x < m-i){ dpotf2_smlpout_fixwidth_device(m-i, A(localstep+i, 0), A(localstep+i, localstep+i), ldda, localstep+i, gbstep, dinfo); //} } } /******************************************************************************/ __global__ void dpotf2_smlpin_anywidth_kernel(int m, double *dA, int ldda, int localstep, int gbstep, magma_int_t *dinfo) { #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ int ib = min(m-i, POTF2_NB); //if(threadIdx.x < m-i){ dpotf2_smlpout_anywidth_device(m-i, ib, A(localstep+i, 0), A(localstep+i, localstep+i), ldda, localstep+i, gbstep, dinfo); //} } } /******************************************************************************/ __global__ void dpotf2_smlpin_fixwidth_kernel_batched(int m, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ //if(threadIdx.x < m-i){ dpotf2_smlpout_fixwidth_device(m-i, dA+localstep+i, dA+localstep+i+(localstep+i)*lda, lda, localstep+i, gbstep, &(info_array[batchid])); //} } } /******************************************************************************/ __global__ void dpotf2_smlpin_anywidth_kernel_batched(int m, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; #pragma unroll for(int i = 0; i < m; i+= POTF2_NB){ int ib = min(m-i, POTF2_NB); //if(threadIdx.x < m-i){ dpotf2_smlpout_anywidth_device(m-i, ib, dA+localstep+i, dA+localstep+i+(localstep+i)*lda, lda, localstep+i, gbstep, &(info_array[batchid])); //} } } /******************************************************************************/ __global__ void dpotf2_smlpout_fixwidth_kernel(int m, double *dA, int lda, int localstep, int gbstep, magma_int_t *dinfo) { dpotf2_smlpout_fixwidth_device(m, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, dinfo ); } /******************************************************************************/ __global__ void dpotf2_smlpout_anywidth_kernel(int m, int n, double *dA, int lda, int localstep, int gbstep, magma_int_t *dinfo) { dpotf2_smlpout_anywidth_device(m, n, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, dinfo ); } /******************************************************************************/ __global__ void dpotf2_smlpout_fixwidth_kernel_batched(int m, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; dpotf2_smlpout_fixwidth_device(m, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ __global__ void dpotf2_smlpout_anywidth_kernel_batched(int m, int n, double **dA_array, int ai, int aj, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; double *dA = dA_array[batchid] + aj * lda + ai; if (batchid >= batchCount) return; dpotf2_smlpout_anywidth_device(m, n, dA+localstep, dA+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ extern "C" magma_int_t magma_dpotrf_lpout_batched( magma_uplo_t uplo, magma_int_t n, double **dA_array, magma_int_t ai, magma_int_t aj, magma_int_t lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magma_int_t roundup_m = m; // rounding up need more investigation since it coul dmodify the matrix out of its bound //magma_int_t m8 = magma_roundup( m, 8 ); //magma_int_t roundup_m = m8 > lda ? m : m8; //magma_int_t m32 = magma_roundup( m, 32 ); //magma_int_t roundup_m = m32 > lda ? m : m32; magma_int_t ib, rows; for (magma_int_t j = 0; j < n; j += POTF2_NB) { ib = min(POTF2_NB, n-j); rows = roundup_m-j; // tuning ntcol magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes if (rows > 64) ntcol = 1; else if (rows > 32) ntcol = NTCOL2; else ntcol = NTCOL1; // end of tuning ntcol const magma_int_t nTB = magma_ceildiv( batchCount, ntcol ); dim3 dimGrid(1, 1, nTB); magma_int_t nbth = rows; magma_int_t shared_mem_size = ntcol * (sizeof(double)*(nbth+POTF2_NB)*POTF2_NB); dim3 threads(nbth, ntcol); if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if (ib == POTF2_NB) { dpotf2_smlpout_fixwidth_kernel_batched <<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>> (rows, dA_array, ai, aj, lda, j, gbstep, info_array, batchCount); } else { dpotf2_smlpout_anywidth_kernel_batched <<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>> (rows, ib, dA_array, ai, aj, lda, j, gbstep, info_array, batchCount); } } return arginfo; } /******************************************************************************/ extern "C" magma_int_t magma_dpotrf_lpin_batched( magma_uplo_t uplo, magma_int_t n, double **dA_array, magma_int_t ai, magma_int_t aj, magma_int_t lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); magma_int_t shared_mem_size = sizeof(double) * (n+POTF2_NB)*POTF2_NB; if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( n % POTF2_NB == 0){ dpotf2_smlpin_fixwidth_kernel_batched <<< grid, threads, shared_mem_size, queue->cuda_stream() >>> (n, dA_array, ai, aj, lda, 0, gbstep, info_array, batchCount); } else{ dpotf2_smlpin_anywidth_kernel_batched <<< grid, threads, shared_mem_size, queue->cuda_stream() >>> (n, dA_array, ai, aj, lda, 0, gbstep, info_array, batchCount); } return arginfo; } /******************************************************************************/ extern "C" magma_int_t magma_dpotf2_lpout( magma_uplo_t uplo, magma_int_t n, double *dA, magma_int_t lda, magma_int_t gbstep, magma_int_t *dinfo, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magma_int_t roundup_m = m; // rounding up need more investigation since it coul dmodify the matrix out of its bound //magma_int_t m8 = magma_roundup( m, 8 ); //magma_int_t roundup_m = m8 > lda ? m : m8; //magma_int_t m32 = magma_roundup( m, 32 ); //magma_int_t roundup_m = m32 > lda ? m : m32; magma_int_t ib, rows; for (magma_int_t j = 0; j < n; j += POTF2_NB) { ib = min(POTF2_NB, n-j); rows = roundup_m-j; dim3 dimGrid(1, 1, 1); magma_int_t nbth = rows; magma_int_t shared_mem_size = sizeof(double)*(nbth+POTF2_NB)*POTF2_NB; dim3 threads(nbth, 1, 1); if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if (ib == POTF2_NB) { dpotf2_smlpout_fixwidth_kernel <<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>> (rows, dA, lda, j, gbstep, dinfo ); } else { dpotf2_smlpout_anywidth_kernel <<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>> (rows, ib, dA, lda, j, gbstep, dinfo ); } } return arginfo; } /******************************************************************************/ extern "C" magma_int_t magma_dpotf2_lpin( magma_uplo_t uplo, magma_int_t n, double *dA, magma_int_t ldda, magma_int_t gbstep, magma_int_t *dinfo, magma_queue_t queue) { magma_int_t arginfo = 0; // Quick return if possible if ( n == 0 ) { return arginfo; } dim3 grid(1, 1, 1); dim3 threads(n, 1, 1); magma_int_t shared_mem_size = sizeof(double) * (n+POTF2_NB)*POTF2_NB; if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( n % POTF2_NB == 0){ dpotf2_smlpin_fixwidth_kernel <<< grid, threads, shared_mem_size, queue->cuda_stream() >>> (n, dA, ldda, 0, gbstep, dinfo); } else{ dpotf2_smlpin_anywidth_kernel <<< grid, threads, shared_mem_size, queue->cuda_stream() >>> (n, dA, ldda, 0, gbstep, dinfo); } return arginfo; }
8bb48a4d8e24905b97b654f1a366364478c5d63c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/aux/optical_flow/turing_of/optical_flow_turing.h" #include "dali/error_handling.h" namespace dali { namespace optical_flow { namespace kernel { namespace { constexpr size_t kBlockSize = 32; /** * Calculating number of blocks * @param length In bytes * @param block_size * @return */ inline size_t num_blocks(size_t length, size_t block_size) { // Calculating ceil for ints return (length + block_size - 1) / block_size; } /** * Access a value at given (x, y) coordinates in a strided 2D array * @param buffer * @param x In pixels * @param y In pixels * @param pitch_bytes Offset, in bytes, between consecutive rows of the array * @return Value at given coordinates */ template<typename T> __host__ __device__ constexpr T & pitch_xy(T *buffer, ptrdiff_t x, ptrdiff_t y, ptrdiff_t pitch_bytes) { return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(buffer) + pitch_bytes * y)[x]; } } // namespace __global__ void DecodeFlowComponentKernel(const int16_t *input, float *output, size_t pitch, size_t width_px, size_t height) { size_t x = threadIdx.x + blockIdx.x * blockDim.x; size_t y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= width_px || y >= height) return; auto value_in = pitch_xy(input, x, y, pitch); size_t outidx = x + width_px * y; output[outidx] = decode_flow_component(value_in); } __global__ void RgbToRgbaKernel(const uint8_t *input, uint8_t *output, size_t pitch, size_t width_px, size_t height) { constexpr size_t in_channels = 3, out_channels = 4; size_t x = threadIdx.x + blockIdx.x * blockDim.x; size_t y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= width_px || y >= height) return; size_t in_idx = in_channels * x + in_channels * width_px * y; size_t out_idx = out_channels * x + pitch * y; output[out_idx] = input[in_idx]; output[out_idx + 1] = input[in_idx + 1]; output[out_idx + 2] = input[in_idx + 2]; output[out_idx + 3] = 255; } void RgbToRgba(const uint8_t *input, uint8_t *output, size_t pitch, size_t width_px, size_t height, hipStream_t stream) { constexpr int out_channels = 4; DALI_ENFORCE(pitch >= out_channels * width_px); dim3 block_dim(kBlockSize, kBlockSize); dim3 grid_dim(num_blocks(out_channels * width_px, block_dim.x), num_blocks(height, block_dim.y)); hipLaunchKernelGGL(( RgbToRgbaKernel), dim3(grid_dim), dim3(block_dim), 0, stream, input, output, pitch, width_px, height); } void DecodeFlowComponents(const int16_t *input, float *output, size_t pitch, size_t width_px, size_t height, hipStream_t stream) { DALI_ENFORCE(pitch >= 2 * sizeof(float) * width_px); dim3 block_dim(kBlockSize, kBlockSize); dim3 grid_dim(num_blocks(sizeof(float) * width_px, block_dim.x), num_blocks(height, block_dim.y)); hipLaunchKernelGGL(( DecodeFlowComponentKernel), dim3(grid_dim), dim3(block_dim), 0, stream, input, output, pitch, sizeof(int16_t) * width_px, height); } } // namespace kernel } // namespace optical_flow } // namespace dali
8bb48a4d8e24905b97b654f1a366364478c5d63c.cu
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/aux/optical_flow/turing_of/optical_flow_turing.h" #include "dali/error_handling.h" namespace dali { namespace optical_flow { namespace kernel { namespace { constexpr size_t kBlockSize = 32; /** * Calculating number of blocks * @param length In bytes * @param block_size * @return */ inline size_t num_blocks(size_t length, size_t block_size) { // Calculating ceil for ints return (length + block_size - 1) / block_size; } /** * Access a value at given (x, y) coordinates in a strided 2D array * @param buffer * @param x In pixels * @param y In pixels * @param pitch_bytes Offset, in bytes, between consecutive rows of the array * @return Value at given coordinates */ template<typename T> __host__ __device__ constexpr T & pitch_xy(T *buffer, ptrdiff_t x, ptrdiff_t y, ptrdiff_t pitch_bytes) { return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(buffer) + pitch_bytes * y)[x]; } } // namespace __global__ void DecodeFlowComponentKernel(const int16_t *input, float *output, size_t pitch, size_t width_px, size_t height) { size_t x = threadIdx.x + blockIdx.x * blockDim.x; size_t y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= width_px || y >= height) return; auto value_in = pitch_xy(input, x, y, pitch); size_t outidx = x + width_px * y; output[outidx] = decode_flow_component(value_in); } __global__ void RgbToRgbaKernel(const uint8_t *input, uint8_t *output, size_t pitch, size_t width_px, size_t height) { constexpr size_t in_channels = 3, out_channels = 4; size_t x = threadIdx.x + blockIdx.x * blockDim.x; size_t y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= width_px || y >= height) return; size_t in_idx = in_channels * x + in_channels * width_px * y; size_t out_idx = out_channels * x + pitch * y; output[out_idx] = input[in_idx]; output[out_idx + 1] = input[in_idx + 1]; output[out_idx + 2] = input[in_idx + 2]; output[out_idx + 3] = 255; } void RgbToRgba(const uint8_t *input, uint8_t *output, size_t pitch, size_t width_px, size_t height, cudaStream_t stream) { constexpr int out_channels = 4; DALI_ENFORCE(pitch >= out_channels * width_px); dim3 block_dim(kBlockSize, kBlockSize); dim3 grid_dim(num_blocks(out_channels * width_px, block_dim.x), num_blocks(height, block_dim.y)); RgbToRgbaKernel<<<grid_dim, block_dim, 0, stream>>>(input, output, pitch, width_px, height); } void DecodeFlowComponents(const int16_t *input, float *output, size_t pitch, size_t width_px, size_t height, cudaStream_t stream) { DALI_ENFORCE(pitch >= 2 * sizeof(float) * width_px); dim3 block_dim(kBlockSize, kBlockSize); dim3 grid_dim(num_blocks(sizeof(float) * width_px, block_dim.x), num_blocks(height, block_dim.y)); DecodeFlowComponentKernel<<<grid_dim, block_dim, 0, stream>>> (input, output, pitch, sizeof(int16_t) * width_px, height); } } // namespace kernel } // namespace optical_flow } // namespace dali
4f68047d9aaaa4ee56333b0ccc11a9839c2f909f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cuml/neighbors/knn_mg.hpp> #include <selection/knn.cuh> #include <common/cumlHandle.hpp> #include <common/cuml_comms_int.hpp> #include <common/device_buffer.hpp> #include <cuml/common/cuml_allocator.hpp> #include <set> #include <cuda_utils.cuh> namespace ML { namespace KNN { namespace opg { namespace knn_common { template <typename T, int TPB_X> __global__ void copy_outputs_kernel(T *out, int64_t *knn_indices, T **parts, int64_t *offsets, size_t cur_batch_size, int n_parts, int n_labels) { int64_t i = (blockIdx.x * TPB_X) + threadIdx.x; if (i >= n_labels) return; int64_t nn_idx = knn_indices[i]; int part_idx = 0; for (; part_idx < n_parts && nn_idx >= offsets[part_idx]; part_idx++) ; part_idx = min(max((int)0, part_idx - 1), n_parts - 1); int64_t offset = nn_idx - offsets[part_idx]; out[i] = parts[part_idx][offset]; } template <typename T> void copy_outputs(T *out, int64_t *knn_indices, std::vector<std::vector<T *>> &y, size_t cur_batch_size, int k, int n_outputs, int n_features, int my_rank, std::vector<Matrix::RankSizePair *> &idxPartsToRanks, std::shared_ptr<deviceAllocator> alloc, hipStream_t stream) { const int TPB_X = 256; int n_labels = cur_batch_size * k; dim3 grid(MLCommon::ceildiv(n_labels, TPB_X)); dim3 blk(TPB_X); int64_t offset = 0; std::vector<int64_t> offsets_h; for (auto &rsp : idxPartsToRanks) { if (rsp->rank == my_rank) { offsets_h.push_back(offset); } offset += rsp->size; } size_t n_parts = offsets_h.size(); device_buffer<int64_t> offsets_d(alloc, stream, n_parts); updateDevice(offsets_d.data(), offsets_h.data(), n_parts, stream); std::vector<T *> parts_h(n_parts); device_buffer<T *> parts_d(alloc, stream, n_parts); for (int o = 0; o < n_outputs; o++) { for (int p = 0; p < n_parts; p++) { parts_h[p] = y[p][o]; } updateDevice(parts_d.data(), parts_h.data(), n_parts, stream); hipLaunchKernelGGL(( copy_outputs_kernel<T, TPB_X>), dim3(grid), dim3(blk), 0, stream, out + (o * n_labels), knn_indices, parts_d.data(), offsets_d.data(), cur_batch_size, n_parts, n_labels); } } template <typename T> void launch_local_operation(T *out, int64_t *knn_indices, std::vector<T *> y, size_t total_labels, size_t cur_batch_size, int k, const std::shared_ptr<deviceAllocator> alloc, hipStream_t stream, hipStream_t *int_streams, int n_int_streams, bool probas_only, std::vector<float *> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique); template <> void launch_local_operation<int>( int *out, int64_t *knn_indices, std::vector<int *> y, size_t total_labels, size_t cur_batch_size, int k, const std::shared_ptr<deviceAllocator> alloc, hipStream_t stream, hipStream_t *int_streams, int n_int_streams, bool probas_only, std::vector<float *> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique) { if (probas_only) { MLCommon::Selection::class_probs<32, true>( *probas, nullptr, y, total_labels, cur_batch_size, k, *uniq_labels, *n_unique, alloc, stream, &int_streams[0], n_int_streams); } else { MLCommon::Selection::knn_classify<32, true>( out, nullptr, y, total_labels, cur_batch_size, k, *uniq_labels, *n_unique, alloc, stream, &int_streams[0], n_int_streams); } } template <> void launch_local_operation<float>( float *out, int64_t *knn_indices, std::vector<float *> y, size_t total_labels, size_t cur_batch_size, int k, const std::shared_ptr<deviceAllocator> alloc, hipStream_t stream, hipStream_t *int_streams, int n_int_streams, bool probas_only, std::vector<float *> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique) { MLCommon::Selection::knn_regress<float, 32, true>( out, nullptr, y, total_labels, cur_batch_size, k, stream, &int_streams[0], n_int_streams); } template <typename T> void perform_local_operation(T *out, int64_t *knn_indices, T *labels, size_t cur_batch_size, int k, int n_outputs, ML::cumlHandle &h, bool probas_only = false, std::vector<float *> *probas = nullptr, std::vector<int *> *uniq_labels = nullptr, std::vector<int> *n_unique = nullptr) { size_t n_labels = cur_batch_size * k; size_t total_labels = n_outputs * n_labels; std::vector<T *> y(n_outputs); for (int o = 0; o < n_outputs; o++) { y[o] = labels + (o * n_labels); } hipStream_t stream = h.getStream(); const std::shared_ptr<deviceAllocator> alloc = h.getDeviceAllocator(); int n_int_streams = h.getImpl().getNumInternalStreams(); hipStream_t int_streams[n_int_streams]; for (int i = 0; i < n_int_streams; i++) { int_streams[i] = h.getImpl().getInternalStream(i); } launch_local_operation<T>(out, knn_indices, y, total_labels, cur_batch_size, k, alloc, stream, int_streams, n_int_streams, probas_only, probas, uniq_labels, n_unique); } template <typename T> void reduce(ML::cumlHandle &handle, std::vector<Matrix::Data<T> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, device_buffer<T> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, Matrix::PartDescriptor &index_desc, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed, int cur_batch, size_t total_n_processed, std::set<int> idxRanks, int my_rank, bool probas_only = false, std::vector<std::vector<float *>> *probas = nullptr, std::vector<int *> *uniq_labels = nullptr, std::vector<int> *n_unique = nullptr) { const ML::cumlHandle_impl &h = handle.getImpl(); hipStream_t stream = h.getStream(); const std::shared_ptr<deviceAllocator> alloc = h.getDeviceAllocator(); device_buffer<int64_t> trans(alloc, stream, idxRanks.size()); CUDA_CHECK(hipMemsetAsync(trans.data(), 0, idxRanks.size() * sizeof(int64_t), stream)); size_t batch_offset = total_n_processed * k; T *output = nullptr; int64_t *indices = nullptr; float *distances = nullptr; device_buffer<int64_t> *indices_b; device_buffer<float> *distances_b; std::vector<float *> probas_with_offsets; if (probas_only) { indices_b = new device_buffer<int64_t>(alloc, stream, cur_batch_size * k); distances_b = new device_buffer<float>(alloc, stream, cur_batch_size * k); indices = indices_b->data(); distances = distances_b->data(); auto &probas_part = probas->at(local_parts_completed); for (float *ptr : probas_part) { probas_with_offsets.push_back(ptr + batch_offset); } } else { output = out->at(local_parts_completed)->ptr + batch_offset; indices = out_I->at(local_parts_completed)->ptr + batch_offset; distances = out_D->at(local_parts_completed)->ptr + batch_offset; } MLCommon::Selection::knn_merge_parts(res_D.data(), res_I.data(), distances, indices, cur_batch_size, idxRanks.size(), k, stream, trans.data()); perform_local_operation(output, indices, res.data(), cur_batch_size, k, n_outputs, handle, probas_only, &probas_with_offsets, uniq_labels, n_unique); if (probas_only) { delete indices_b; delete distances_b; } } void perform_local_knn(int64_t *res_I, float *res_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::RankSizePair *> &local_idx_parts, std::vector<size_t> &start_indices, hipStream_t stream, hipStream_t *internal_streams, int n_internal_streams, std::shared_ptr<deviceAllocator> allocator, size_t cur_batch_size, int k, float *cur_query_ptr, bool rowMajorIndex, bool rowMajorQuery) { std::vector<float *> ptrs(idx_data.size()); std::vector<int> sizes(idx_data.size()); for (int cur_idx = 0; cur_idx < idx_data.size(); cur_idx++) { ptrs[cur_idx] = idx_data[cur_idx]->ptr; sizes[cur_idx] = local_idx_parts[cur_idx]->size; } // PartDescriptor uses size_t while FAISS uses int64_t // so we need to do a quick conversion. std::vector<int64_t> start_indices_long; for (size_t start_index : start_indices) start_indices_long.push_back((int64_t)start_index); // ID ranges need to be offset by each local partition's // starting indices. MLCommon::Selection::brute_force_knn( ptrs, sizes, (int)idx_desc.N, cur_query_ptr, (int)cur_batch_size, res_I, res_D, k, allocator, stream, internal_streams, n_internal_streams, rowMajorIndex, rowMajorQuery, &start_indices_long); } void broadcast_query(float *query, size_t batch_input_elms, int part_rank, std::set<int> idxRanks, const cumlCommunicator &comm, hipStream_t stream) { int my_rank = comm.getRank(); int request_idx = 0; std::vector<MLCommon::cumlCommunicator::request_t> requests; if (part_rank == my_rank) { int idx_rank_size = idxRanks.size(); if (idxRanks.find(my_rank) != idxRanks.end()) { --idx_rank_size; } requests.resize(idx_rank_size); for (int rank : idxRanks) { if (rank != my_rank) { comm.isend(query, batch_input_elms, rank, 0, requests.data() + request_idx); ++request_idx; } } } else { requests.resize(1); comm.irecv(query, batch_input_elms, part_rank, 0, requests.data() + request_idx); ++request_idx; } try { comm.waitall(requests.size(), requests.data()); } catch (Exception &e) { std::cout << "FAILURE!" << std::endl; } } /** * All non-root index ranks send the results for the current * query batch to the root rank for the batch. */ template <typename T> void exchange_results(device_buffer<T> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, const cumlCommunicator &comm, int part_rank, std::set<int> idxRanks, hipStream_t stream, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed) { int my_rank = comm.getRank(); size_t batch_elms = cur_batch_size * k; int request_idx = 0; std::vector<MLCommon::cumlCommunicator::request_t> requests; if (part_rank != my_rank) { requests.resize(2 + n_outputs); comm.isend(res_I.data(), batch_elms, part_rank, 0, requests.data() + request_idx); ++request_idx; comm.isend(res_D.data(), batch_elms, part_rank, 0, requests.data() + request_idx); ++request_idx; for (size_t o = 0; o < n_outputs; o++) { comm.isend(res.data() + (o * batch_elms), batch_elms, part_rank, 0, requests.data() + request_idx); request_idx++; } } else { bool part_rank_is_idx = idxRanks.find(part_rank) != idxRanks.end(); int idx_rank_size = idxRanks.size(); int num_received = 0; // if root rank is an index, it will already have // query data, so no need to receive from it. res.resize(batch_elms * n_outputs * idx_rank_size, stream); res_I.resize(batch_elms * idx_rank_size, stream); res_D.resize(batch_elms * idx_rank_size, stream); if (part_rank_is_idx) { num_received = 1; // root rank will take the zeroth slot --idx_rank_size; } requests.resize((2 + n_outputs) * idx_rank_size); for (int rank : idxRanks) { if (rank != my_rank) { size_t batch_offset = batch_elms * num_received; comm.irecv(res_I.data() + batch_offset, batch_elms, rank, 0, requests.data() + request_idx); ++request_idx; comm.irecv(res_D.data() + batch_offset, batch_elms, rank, 0, requests.data() + request_idx); ++request_idx; for (size_t o = 0; o < n_outputs; o++) { T *r = res.data() + (o * idxRanks.size() * batch_elms) + batch_offset; comm.irecv(r, batch_elms, rank, 0, requests.data() + request_idx); ++request_idx; } ++num_received; } } } try { comm.waitall(requests.size(), requests.data()); } catch (Exception &e) { std::cout << "FAILURE!" << std::endl; } } template <typename T> void opg_knn(ML::cumlHandle &handle, std::vector<Matrix::Data<T> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::floatData_t *> &query_data, Matrix::PartDescriptor &query_desc, std::vector<std::vector<T *>> &y, bool rowMajorIndex, bool rowMajorQuery, int k, int n_outputs, size_t batch_size, bool verbose, std::vector<std::vector<float *>> *probas = nullptr, std::vector<int *> *uniq_labels = nullptr, std::vector<int> *n_unique = nullptr, bool probas_only = false) { ASSERT(k <= 1024, "k must be <= 1024"); ASSERT(batch_size > 0, "max_batch_size must be > 0"); ASSERT(k < idx_desc.M, "k must be less than the total number of query rows"); for (Matrix::RankSizePair *rsp : idx_desc.partsToRanks) { ASSERT(rsp->size >= k, "k must be <= the number of rows in the smallest index partition."); } const ML::cumlHandle_impl &h = handle.getImpl(); const cumlCommunicator &comm = h.getCommunicator(); hipStream_t stream = h.getStream(); const std::shared_ptr<deviceAllocator> allocator = h.getDeviceAllocator(); int my_rank = comm.getRank(); std::set<int> idxRanks = idx_desc.uniqueRanks(); std::vector<Matrix::RankSizePair *> local_idx_parts = idx_desc.blocksOwnedBy(comm.getRank()); int local_parts_completed = 0; // Loop through query parts for all ranks for (int i = 0; i < query_desc.totalBlocks(); i++) { Matrix::RankSizePair *partition = query_desc.partsToRanks[i]; int part_rank = partition->rank; size_t part_n_rows = partition->size; size_t total_batches = ceildiv(part_n_rows, batch_size); size_t total_n_processed = 0; // Loop through batches for each query part for (int cur_batch = 0; cur_batch < total_batches; cur_batch++) { size_t cur_batch_size = batch_size; if (cur_batch == total_batches - 1) cur_batch_size = part_n_rows - (cur_batch * batch_size); if (my_rank == part_rank && verbose) { std::cout << "Root Rank is " << my_rank << std::endl; } /** * Root broadcasts batch to all other ranks */ if (verbose) { std::cout << "Rank " << my_rank << ": Performing Broadcast" << std::endl; } int my_rank = comm.getRank(); device_buffer<float> part_data(allocator, stream, 0); size_t batch_input_elms = cur_batch_size * query_desc.N; size_t batch_input_offset = batch_input_elms * cur_batch; float *cur_query_ptr; device_buffer<float> tmp_batch_buf(allocator, stream, 0); // current partition's owner rank broadcasts if (part_rank == my_rank) { Matrix::Data<float> *data = query_data[local_parts_completed]; // If query is column major and total_batches > 0, create a // temporary buffer for the batch so that we can stack rows. if (!rowMajorQuery && total_batches > 1) { tmp_batch_buf.resize(batch_input_elms, stream); for (int col_data = 0; col_data < query_desc.N; col_data++) { copy(tmp_batch_buf.data() + (col_data * cur_batch_size), data->ptr + ((col_data * part_n_rows) + total_n_processed), cur_batch_size, stream); } cur_query_ptr = tmp_batch_buf.data(); } else { cur_query_ptr = data->ptr + batch_input_offset; } // all other (index) ranks receive } else if (idxRanks.find(my_rank) != idxRanks.end()) { part_data.resize(batch_input_elms, stream); cur_query_ptr = part_data.data(); } bool my_rank_is_idx = idxRanks.find(my_rank) != idxRanks.end(); /** * Send query to index partitions */ if (my_rank == part_rank || my_rank_is_idx) broadcast_query(cur_query_ptr, batch_input_elms, part_rank, idxRanks, comm, stream); device_buffer<T> res(allocator, stream); device_buffer<int64_t> res_I(allocator, stream); device_buffer<float> res_D(allocator, stream); if (my_rank_is_idx) { /** * All index ranks perform local KNN */ if (verbose) std::cout << "Rank " << my_rank << ": Performing Local KNN" << std::endl; size_t batch_knn_elms = k * cur_batch_size; res.resize(batch_knn_elms * n_outputs, stream); res_I.resize(batch_knn_elms, stream); res_D.resize(batch_knn_elms, stream); // Offset nearest neighbor index matrix by partition indices std::vector<size_t> start_indices = idx_desc.startIndices(my_rank); hipStream_t int_streams[handle.getImpl().getNumInternalStreams()]; for (int i = 0; i < handle.getImpl().getNumInternalStreams(); i++) { int_streams[i] = handle.getImpl().getInternalStream(i); } perform_local_knn(res_I.data(), res_D.data(), idx_data, idx_desc, local_idx_parts, start_indices, stream, int_streams, handle.getNumInternalStreams(), handle.getDeviceAllocator(), cur_batch_size, k, cur_query_ptr, rowMajorIndex, rowMajorQuery); // Synchronize before running labels copy CUDA_CHECK(hipStreamSynchronize(stream)); copy_outputs(res.data(), res_I.data(), y, (size_t)cur_batch_size, (int)k, (int)n_outputs, (int)idx_desc.N, my_rank, idx_desc.partsToRanks, handle.getDeviceAllocator(), stream); // Synchronize before sending CUDA_CHECK(hipStreamSynchronize(stream)); } /** * Ranks exchange results. * Partition owner receives. All other ranks send. */ if (verbose) std::cout << "Rank " << my_rank << ": Exchanging results" << std::endl; exchange_results(res, res_I, res_D, comm, part_rank, idxRanks, stream, cur_batch_size, k, n_outputs, local_parts_completed); /** * Root rank performs local reduce */ if (part_rank == my_rank) { if (verbose) std::cout << "Rank " << my_rank << ": Performing Reduce" << std::endl; reduce(handle, out, out_I, out_D, res, res_I, res_D, idx_desc, cur_batch_size, k, n_outputs, local_parts_completed, cur_batch, total_n_processed, idxRanks, my_rank, probas_only, probas, uniq_labels, n_unique); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipPeekAtLastError()); if (verbose) std::cout << "Rank " << my_rank << ": Finished Reduce" << std::endl; } total_n_processed += cur_batch_size; } if (my_rank == part_rank) local_parts_completed++; } }; template void opg_knn<int>(ML::cumlHandle &handle, std::vector<Matrix::Data<int> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::floatData_t *> &query_data, Matrix::PartDescriptor &query_desc, std::vector<std::vector<int *>> &y, bool rowMajorIndex, bool rowMajorQuery, int k, int n_outputs, size_t batch_size, bool verbose, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique, bool probas_only); template void opg_knn<float>(ML::cumlHandle &handle, std::vector<Matrix::Data<float> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::floatData_t *> &query_data, Matrix::PartDescriptor &query_desc, std::vector<std::vector<float *>> &y, bool rowMajorIndex, bool rowMajorQuery, int k, int n_outputs, size_t batch_size, bool verbose, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique, bool probas_only); template void reduce<int>( ML::cumlHandle &handle, std::vector<Matrix::Data<int> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, device_buffer<int> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, Matrix::PartDescriptor &index_desc, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed, int cur_batch, size_t total_n_processed, std::set<int> idxRanks, int my_rank, bool probas_only, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique); template void reduce<float>( ML::cumlHandle &handle, std::vector<Matrix::Data<float> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, device_buffer<float> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, Matrix::PartDescriptor &index_desc, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed, int cur_batch, size_t total_n_processed, std::set<int> idxRanks, int my_rank, bool probas_only, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique); template void exchange_results<int>(device_buffer<int> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, const cumlCommunicator &comm, int part_rank, std::set<int> idxRanks, hipStream_t stream, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed); template void exchange_results<float>( device_buffer<float> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, const cumlCommunicator &comm, int part_rank, std::set<int> idxRanks, hipStream_t stream, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed); template void copy_outputs<int>( int *out, int64_t *knn_indices, std::vector<std::vector<int *>> &y, size_t cur_batch_size, int k, int n_outputs, int n_features, int my_rank, std::vector<Matrix::RankSizePair *> &idxPartsToRanks, std::shared_ptr<deviceAllocator> alloc, hipStream_t stream); template void copy_outputs<float>( float *out, int64_t *knn_indices, std::vector<std::vector<float *>> &y, size_t cur_batch_size, int k, int n_outputs, int n_features, int my_rank, std::vector<Matrix::RankSizePair *> &idxPartsToRanks, std::shared_ptr<deviceAllocator> alloc, hipStream_t stream); }; // namespace knn_common }; // namespace opg }; // namespace KNN }; // namespace ML
4f68047d9aaaa4ee56333b0ccc11a9839c2f909f.cu
/* * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cuml/neighbors/knn_mg.hpp> #include <selection/knn.cuh> #include <common/cumlHandle.hpp> #include <common/cuml_comms_int.hpp> #include <common/device_buffer.hpp> #include <cuml/common/cuml_allocator.hpp> #include <set> #include <cuda_utils.cuh> namespace ML { namespace KNN { namespace opg { namespace knn_common { template <typename T, int TPB_X> __global__ void copy_outputs_kernel(T *out, int64_t *knn_indices, T **parts, int64_t *offsets, size_t cur_batch_size, int n_parts, int n_labels) { int64_t i = (blockIdx.x * TPB_X) + threadIdx.x; if (i >= n_labels) return; int64_t nn_idx = knn_indices[i]; int part_idx = 0; for (; part_idx < n_parts && nn_idx >= offsets[part_idx]; part_idx++) ; part_idx = min(max((int)0, part_idx - 1), n_parts - 1); int64_t offset = nn_idx - offsets[part_idx]; out[i] = parts[part_idx][offset]; } template <typename T> void copy_outputs(T *out, int64_t *knn_indices, std::vector<std::vector<T *>> &y, size_t cur_batch_size, int k, int n_outputs, int n_features, int my_rank, std::vector<Matrix::RankSizePair *> &idxPartsToRanks, std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream) { const int TPB_X = 256; int n_labels = cur_batch_size * k; dim3 grid(MLCommon::ceildiv(n_labels, TPB_X)); dim3 blk(TPB_X); int64_t offset = 0; std::vector<int64_t> offsets_h; for (auto &rsp : idxPartsToRanks) { if (rsp->rank == my_rank) { offsets_h.push_back(offset); } offset += rsp->size; } size_t n_parts = offsets_h.size(); device_buffer<int64_t> offsets_d(alloc, stream, n_parts); updateDevice(offsets_d.data(), offsets_h.data(), n_parts, stream); std::vector<T *> parts_h(n_parts); device_buffer<T *> parts_d(alloc, stream, n_parts); for (int o = 0; o < n_outputs; o++) { for (int p = 0; p < n_parts; p++) { parts_h[p] = y[p][o]; } updateDevice(parts_d.data(), parts_h.data(), n_parts, stream); copy_outputs_kernel<T, TPB_X><<<grid, blk, 0, stream>>>( out + (o * n_labels), knn_indices, parts_d.data(), offsets_d.data(), cur_batch_size, n_parts, n_labels); } } template <typename T> void launch_local_operation(T *out, int64_t *knn_indices, std::vector<T *> y, size_t total_labels, size_t cur_batch_size, int k, const std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream, cudaStream_t *int_streams, int n_int_streams, bool probas_only, std::vector<float *> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique); template <> void launch_local_operation<int>( int *out, int64_t *knn_indices, std::vector<int *> y, size_t total_labels, size_t cur_batch_size, int k, const std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream, cudaStream_t *int_streams, int n_int_streams, bool probas_only, std::vector<float *> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique) { if (probas_only) { MLCommon::Selection::class_probs<32, true>( *probas, nullptr, y, total_labels, cur_batch_size, k, *uniq_labels, *n_unique, alloc, stream, &int_streams[0], n_int_streams); } else { MLCommon::Selection::knn_classify<32, true>( out, nullptr, y, total_labels, cur_batch_size, k, *uniq_labels, *n_unique, alloc, stream, &int_streams[0], n_int_streams); } } template <> void launch_local_operation<float>( float *out, int64_t *knn_indices, std::vector<float *> y, size_t total_labels, size_t cur_batch_size, int k, const std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream, cudaStream_t *int_streams, int n_int_streams, bool probas_only, std::vector<float *> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique) { MLCommon::Selection::knn_regress<float, 32, true>( out, nullptr, y, total_labels, cur_batch_size, k, stream, &int_streams[0], n_int_streams); } template <typename T> void perform_local_operation(T *out, int64_t *knn_indices, T *labels, size_t cur_batch_size, int k, int n_outputs, ML::cumlHandle &h, bool probas_only = false, std::vector<float *> *probas = nullptr, std::vector<int *> *uniq_labels = nullptr, std::vector<int> *n_unique = nullptr) { size_t n_labels = cur_batch_size * k; size_t total_labels = n_outputs * n_labels; std::vector<T *> y(n_outputs); for (int o = 0; o < n_outputs; o++) { y[o] = labels + (o * n_labels); } cudaStream_t stream = h.getStream(); const std::shared_ptr<deviceAllocator> alloc = h.getDeviceAllocator(); int n_int_streams = h.getImpl().getNumInternalStreams(); cudaStream_t int_streams[n_int_streams]; for (int i = 0; i < n_int_streams; i++) { int_streams[i] = h.getImpl().getInternalStream(i); } launch_local_operation<T>(out, knn_indices, y, total_labels, cur_batch_size, k, alloc, stream, int_streams, n_int_streams, probas_only, probas, uniq_labels, n_unique); } template <typename T> void reduce(ML::cumlHandle &handle, std::vector<Matrix::Data<T> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, device_buffer<T> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, Matrix::PartDescriptor &index_desc, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed, int cur_batch, size_t total_n_processed, std::set<int> idxRanks, int my_rank, bool probas_only = false, std::vector<std::vector<float *>> *probas = nullptr, std::vector<int *> *uniq_labels = nullptr, std::vector<int> *n_unique = nullptr) { const ML::cumlHandle_impl &h = handle.getImpl(); cudaStream_t stream = h.getStream(); const std::shared_ptr<deviceAllocator> alloc = h.getDeviceAllocator(); device_buffer<int64_t> trans(alloc, stream, idxRanks.size()); CUDA_CHECK(cudaMemsetAsync(trans.data(), 0, idxRanks.size() * sizeof(int64_t), stream)); size_t batch_offset = total_n_processed * k; T *output = nullptr; int64_t *indices = nullptr; float *distances = nullptr; device_buffer<int64_t> *indices_b; device_buffer<float> *distances_b; std::vector<float *> probas_with_offsets; if (probas_only) { indices_b = new device_buffer<int64_t>(alloc, stream, cur_batch_size * k); distances_b = new device_buffer<float>(alloc, stream, cur_batch_size * k); indices = indices_b->data(); distances = distances_b->data(); auto &probas_part = probas->at(local_parts_completed); for (float *ptr : probas_part) { probas_with_offsets.push_back(ptr + batch_offset); } } else { output = out->at(local_parts_completed)->ptr + batch_offset; indices = out_I->at(local_parts_completed)->ptr + batch_offset; distances = out_D->at(local_parts_completed)->ptr + batch_offset; } MLCommon::Selection::knn_merge_parts(res_D.data(), res_I.data(), distances, indices, cur_batch_size, idxRanks.size(), k, stream, trans.data()); perform_local_operation(output, indices, res.data(), cur_batch_size, k, n_outputs, handle, probas_only, &probas_with_offsets, uniq_labels, n_unique); if (probas_only) { delete indices_b; delete distances_b; } } void perform_local_knn(int64_t *res_I, float *res_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::RankSizePair *> &local_idx_parts, std::vector<size_t> &start_indices, cudaStream_t stream, cudaStream_t *internal_streams, int n_internal_streams, std::shared_ptr<deviceAllocator> allocator, size_t cur_batch_size, int k, float *cur_query_ptr, bool rowMajorIndex, bool rowMajorQuery) { std::vector<float *> ptrs(idx_data.size()); std::vector<int> sizes(idx_data.size()); for (int cur_idx = 0; cur_idx < idx_data.size(); cur_idx++) { ptrs[cur_idx] = idx_data[cur_idx]->ptr; sizes[cur_idx] = local_idx_parts[cur_idx]->size; } // PartDescriptor uses size_t while FAISS uses int64_t // so we need to do a quick conversion. std::vector<int64_t> start_indices_long; for (size_t start_index : start_indices) start_indices_long.push_back((int64_t)start_index); // ID ranges need to be offset by each local partition's // starting indices. MLCommon::Selection::brute_force_knn( ptrs, sizes, (int)idx_desc.N, cur_query_ptr, (int)cur_batch_size, res_I, res_D, k, allocator, stream, internal_streams, n_internal_streams, rowMajorIndex, rowMajorQuery, &start_indices_long); } void broadcast_query(float *query, size_t batch_input_elms, int part_rank, std::set<int> idxRanks, const cumlCommunicator &comm, cudaStream_t stream) { int my_rank = comm.getRank(); int request_idx = 0; std::vector<MLCommon::cumlCommunicator::request_t> requests; if (part_rank == my_rank) { int idx_rank_size = idxRanks.size(); if (idxRanks.find(my_rank) != idxRanks.end()) { --idx_rank_size; } requests.resize(idx_rank_size); for (int rank : idxRanks) { if (rank != my_rank) { comm.isend(query, batch_input_elms, rank, 0, requests.data() + request_idx); ++request_idx; } } } else { requests.resize(1); comm.irecv(query, batch_input_elms, part_rank, 0, requests.data() + request_idx); ++request_idx; } try { comm.waitall(requests.size(), requests.data()); } catch (Exception &e) { std::cout << "FAILURE!" << std::endl; } } /** * All non-root index ranks send the results for the current * query batch to the root rank for the batch. */ template <typename T> void exchange_results(device_buffer<T> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, const cumlCommunicator &comm, int part_rank, std::set<int> idxRanks, cudaStream_t stream, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed) { int my_rank = comm.getRank(); size_t batch_elms = cur_batch_size * k; int request_idx = 0; std::vector<MLCommon::cumlCommunicator::request_t> requests; if (part_rank != my_rank) { requests.resize(2 + n_outputs); comm.isend(res_I.data(), batch_elms, part_rank, 0, requests.data() + request_idx); ++request_idx; comm.isend(res_D.data(), batch_elms, part_rank, 0, requests.data() + request_idx); ++request_idx; for (size_t o = 0; o < n_outputs; o++) { comm.isend(res.data() + (o * batch_elms), batch_elms, part_rank, 0, requests.data() + request_idx); request_idx++; } } else { bool part_rank_is_idx = idxRanks.find(part_rank) != idxRanks.end(); int idx_rank_size = idxRanks.size(); int num_received = 0; // if root rank is an index, it will already have // query data, so no need to receive from it. res.resize(batch_elms * n_outputs * idx_rank_size, stream); res_I.resize(batch_elms * idx_rank_size, stream); res_D.resize(batch_elms * idx_rank_size, stream); if (part_rank_is_idx) { num_received = 1; // root rank will take the zeroth slot --idx_rank_size; } requests.resize((2 + n_outputs) * idx_rank_size); for (int rank : idxRanks) { if (rank != my_rank) { size_t batch_offset = batch_elms * num_received; comm.irecv(res_I.data() + batch_offset, batch_elms, rank, 0, requests.data() + request_idx); ++request_idx; comm.irecv(res_D.data() + batch_offset, batch_elms, rank, 0, requests.data() + request_idx); ++request_idx; for (size_t o = 0; o < n_outputs; o++) { T *r = res.data() + (o * idxRanks.size() * batch_elms) + batch_offset; comm.irecv(r, batch_elms, rank, 0, requests.data() + request_idx); ++request_idx; } ++num_received; } } } try { comm.waitall(requests.size(), requests.data()); } catch (Exception &e) { std::cout << "FAILURE!" << std::endl; } } template <typename T> void opg_knn(ML::cumlHandle &handle, std::vector<Matrix::Data<T> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::floatData_t *> &query_data, Matrix::PartDescriptor &query_desc, std::vector<std::vector<T *>> &y, bool rowMajorIndex, bool rowMajorQuery, int k, int n_outputs, size_t batch_size, bool verbose, std::vector<std::vector<float *>> *probas = nullptr, std::vector<int *> *uniq_labels = nullptr, std::vector<int> *n_unique = nullptr, bool probas_only = false) { ASSERT(k <= 1024, "k must be <= 1024"); ASSERT(batch_size > 0, "max_batch_size must be > 0"); ASSERT(k < idx_desc.M, "k must be less than the total number of query rows"); for (Matrix::RankSizePair *rsp : idx_desc.partsToRanks) { ASSERT(rsp->size >= k, "k must be <= the number of rows in the smallest index partition."); } const ML::cumlHandle_impl &h = handle.getImpl(); const cumlCommunicator &comm = h.getCommunicator(); cudaStream_t stream = h.getStream(); const std::shared_ptr<deviceAllocator> allocator = h.getDeviceAllocator(); int my_rank = comm.getRank(); std::set<int> idxRanks = idx_desc.uniqueRanks(); std::vector<Matrix::RankSizePair *> local_idx_parts = idx_desc.blocksOwnedBy(comm.getRank()); int local_parts_completed = 0; // Loop through query parts for all ranks for (int i = 0; i < query_desc.totalBlocks(); i++) { Matrix::RankSizePair *partition = query_desc.partsToRanks[i]; int part_rank = partition->rank; size_t part_n_rows = partition->size; size_t total_batches = ceildiv(part_n_rows, batch_size); size_t total_n_processed = 0; // Loop through batches for each query part for (int cur_batch = 0; cur_batch < total_batches; cur_batch++) { size_t cur_batch_size = batch_size; if (cur_batch == total_batches - 1) cur_batch_size = part_n_rows - (cur_batch * batch_size); if (my_rank == part_rank && verbose) { std::cout << "Root Rank is " << my_rank << std::endl; } /** * Root broadcasts batch to all other ranks */ if (verbose) { std::cout << "Rank " << my_rank << ": Performing Broadcast" << std::endl; } int my_rank = comm.getRank(); device_buffer<float> part_data(allocator, stream, 0); size_t batch_input_elms = cur_batch_size * query_desc.N; size_t batch_input_offset = batch_input_elms * cur_batch; float *cur_query_ptr; device_buffer<float> tmp_batch_buf(allocator, stream, 0); // current partition's owner rank broadcasts if (part_rank == my_rank) { Matrix::Data<float> *data = query_data[local_parts_completed]; // If query is column major and total_batches > 0, create a // temporary buffer for the batch so that we can stack rows. if (!rowMajorQuery && total_batches > 1) { tmp_batch_buf.resize(batch_input_elms, stream); for (int col_data = 0; col_data < query_desc.N; col_data++) { copy(tmp_batch_buf.data() + (col_data * cur_batch_size), data->ptr + ((col_data * part_n_rows) + total_n_processed), cur_batch_size, stream); } cur_query_ptr = tmp_batch_buf.data(); } else { cur_query_ptr = data->ptr + batch_input_offset; } // all other (index) ranks receive } else if (idxRanks.find(my_rank) != idxRanks.end()) { part_data.resize(batch_input_elms, stream); cur_query_ptr = part_data.data(); } bool my_rank_is_idx = idxRanks.find(my_rank) != idxRanks.end(); /** * Send query to index partitions */ if (my_rank == part_rank || my_rank_is_idx) broadcast_query(cur_query_ptr, batch_input_elms, part_rank, idxRanks, comm, stream); device_buffer<T> res(allocator, stream); device_buffer<int64_t> res_I(allocator, stream); device_buffer<float> res_D(allocator, stream); if (my_rank_is_idx) { /** * All index ranks perform local KNN */ if (verbose) std::cout << "Rank " << my_rank << ": Performing Local KNN" << std::endl; size_t batch_knn_elms = k * cur_batch_size; res.resize(batch_knn_elms * n_outputs, stream); res_I.resize(batch_knn_elms, stream); res_D.resize(batch_knn_elms, stream); // Offset nearest neighbor index matrix by partition indices std::vector<size_t> start_indices = idx_desc.startIndices(my_rank); cudaStream_t int_streams[handle.getImpl().getNumInternalStreams()]; for (int i = 0; i < handle.getImpl().getNumInternalStreams(); i++) { int_streams[i] = handle.getImpl().getInternalStream(i); } perform_local_knn(res_I.data(), res_D.data(), idx_data, idx_desc, local_idx_parts, start_indices, stream, int_streams, handle.getNumInternalStreams(), handle.getDeviceAllocator(), cur_batch_size, k, cur_query_ptr, rowMajorIndex, rowMajorQuery); // Synchronize before running labels copy CUDA_CHECK(cudaStreamSynchronize(stream)); copy_outputs(res.data(), res_I.data(), y, (size_t)cur_batch_size, (int)k, (int)n_outputs, (int)idx_desc.N, my_rank, idx_desc.partsToRanks, handle.getDeviceAllocator(), stream); // Synchronize before sending CUDA_CHECK(cudaStreamSynchronize(stream)); } /** * Ranks exchange results. * Partition owner receives. All other ranks send. */ if (verbose) std::cout << "Rank " << my_rank << ": Exchanging results" << std::endl; exchange_results(res, res_I, res_D, comm, part_rank, idxRanks, stream, cur_batch_size, k, n_outputs, local_parts_completed); /** * Root rank performs local reduce */ if (part_rank == my_rank) { if (verbose) std::cout << "Rank " << my_rank << ": Performing Reduce" << std::endl; reduce(handle, out, out_I, out_D, res, res_I, res_D, idx_desc, cur_batch_size, k, n_outputs, local_parts_completed, cur_batch, total_n_processed, idxRanks, my_rank, probas_only, probas, uniq_labels, n_unique); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaPeekAtLastError()); if (verbose) std::cout << "Rank " << my_rank << ": Finished Reduce" << std::endl; } total_n_processed += cur_batch_size; } if (my_rank == part_rank) local_parts_completed++; } }; template void opg_knn<int>(ML::cumlHandle &handle, std::vector<Matrix::Data<int> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::floatData_t *> &query_data, Matrix::PartDescriptor &query_desc, std::vector<std::vector<int *>> &y, bool rowMajorIndex, bool rowMajorQuery, int k, int n_outputs, size_t batch_size, bool verbose, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique, bool probas_only); template void opg_knn<float>(ML::cumlHandle &handle, std::vector<Matrix::Data<float> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, std::vector<Matrix::floatData_t *> &idx_data, Matrix::PartDescriptor &idx_desc, std::vector<Matrix::floatData_t *> &query_data, Matrix::PartDescriptor &query_desc, std::vector<std::vector<float *>> &y, bool rowMajorIndex, bool rowMajorQuery, int k, int n_outputs, size_t batch_size, bool verbose, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique, bool probas_only); template void reduce<int>( ML::cumlHandle &handle, std::vector<Matrix::Data<int> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, device_buffer<int> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, Matrix::PartDescriptor &index_desc, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed, int cur_batch, size_t total_n_processed, std::set<int> idxRanks, int my_rank, bool probas_only, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique); template void reduce<float>( ML::cumlHandle &handle, std::vector<Matrix::Data<float> *> *out, std::vector<Matrix::Data<int64_t> *> *out_I, std::vector<Matrix::floatData_t *> *out_D, device_buffer<float> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, Matrix::PartDescriptor &index_desc, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed, int cur_batch, size_t total_n_processed, std::set<int> idxRanks, int my_rank, bool probas_only, std::vector<std::vector<float *>> *probas, std::vector<int *> *uniq_labels, std::vector<int> *n_unique); template void exchange_results<int>(device_buffer<int> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, const cumlCommunicator &comm, int part_rank, std::set<int> idxRanks, cudaStream_t stream, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed); template void exchange_results<float>( device_buffer<float> &res, device_buffer<int64_t> &res_I, device_buffer<float> &res_D, const cumlCommunicator &comm, int part_rank, std::set<int> idxRanks, cudaStream_t stream, size_t cur_batch_size, int k, int n_outputs, int local_parts_completed); template void copy_outputs<int>( int *out, int64_t *knn_indices, std::vector<std::vector<int *>> &y, size_t cur_batch_size, int k, int n_outputs, int n_features, int my_rank, std::vector<Matrix::RankSizePair *> &idxPartsToRanks, std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream); template void copy_outputs<float>( float *out, int64_t *knn_indices, std::vector<std::vector<float *>> &y, size_t cur_batch_size, int k, int n_outputs, int n_features, int my_rank, std::vector<Matrix::RankSizePair *> &idxPartsToRanks, std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream); }; // namespace knn_common }; // namespace opg }; // namespace KNN }; // namespace ML
3ccd747185725a453f0735aff7288f0696819cd2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" %%cu # include <stdio.h> # include <stdlib.h> # include <hip/hip_runtime.h> long n = 33000; __global__ void testKernel(long *in, long *out, long size, long n) { int index = blockIdx.x * blockDim.x + threadIdx.x; long temp; __shared__ bool swappedodd; __shared__ bool swappedeven; long i; // printf("Thread number: %d ;", index); for(i=0; i < n; i++) { if(n % 2 == 0) { if (i % 2 == 0) { __syncthreads(); swappedeven = false; __syncthreads(); if (index < (size / 2)) { //printf("Odd: Thread number: %d ;", index); if (in[index * 2] > in[index * 2 + 1]) { //printf("Odd: Thread %d change: %ld <-> %ld ;", index, in[index*2], in[index*2 +1]); temp = in[index * 2]; in[index * 2] = in[index * 2 + 1]; in[index * 2 + 1] = temp; swappedeven=true; } } __syncthreads(); } else { __syncthreads(); swappedodd = false; __syncthreads(); if (index < (size / 2) - 1) { //printf("Even: Thread number: %d ;", index); if (in[index * 2 + 1] > in[index * 2 + 2]) { //printf("Even: Thread %d change: %ld <-> %ld ;", index, in[index*2+1], in[index*2 +2]); temp = in[index * 2 + 1]; in[index * 2 + 1] = in[index * 2 + 2]; in[index * 2 + 2] = temp; swappedodd=true; } } __syncthreads(); } } else { if (i % 2 == 0) { __syncthreads(); swappedeven = false; __syncthreads(); if (index < (size / 2)) { //printf("Odd: Thread number: %d ;", index); if (in[index * 2] > in[index * 2 + 1]) { //printf("Odd: Thread %d change: %ld <-> %ld ;", index, in[index*2], in[index*2 +1]); temp = in[index * 2]; in[index * 2] = in[index * 2 + 1]; in[index * 2 + 1] = temp; swappedeven=true; } } __syncthreads(); } else { __syncthreads(); swappedodd = false; __syncthreads(); if (index < (size / 2)) { //printf("Even: Thread number: %d ;", index); if (in[index * 2 + 1] > in[index * 2 + 2]) { //printf("Even: Thread %d change: %ld <-> %ld ;", index, in[index*2+1], in[index*2 +2]); temp = in[index * 2 + 1]; in[index * 2 + 1] = in[index * 2 + 2]; in[index * 2 + 2] = temp; swappedodd=true; } } __syncthreads(); } } if (!(swappedodd || swappedeven)) break; } __syncthreads(); //printf("Final: Thread %d val = %ld ;", index, in[index]); // printf("Final: Thread %d val = %ld ;", size / 2 + index, in[size / 2 + index]); out[index] = in[index]; out[size / 2 + index] = in[size / 2 + index]; if(n % 2 != 0) { if(index == size / 2 - 1) { out[size - 1] = in[size - 1]; } } } int main(void) { long i; long * a, * a_sorted; long * d_a, * d_sorted; //int n = 1* 1000 * 10; //make sure to keep this even long size = sizeof(long) * n; hipEvent_t start, stop; hipEventCreate( & start); hipEventCreate( & stop); hipEventRecord(start, 0); hipMalloc((void ** ) & d_a, size); hipMalloc((void ** ) & d_sorted, size); a = (long * )malloc(size); a_sorted = (long * )malloc(size); time_t t; /* Intializes random number generator */ srand((unsigned)time( & t)); int random_nr; for (i = 0; i < n; i++) { random_nr = rand() % 100; a[i] = random_nr; //printf(" a[%d] = %ld ", i, a[i]); printf("%ld ;", a[i]); } //d_a -> destination. a -> source. //Host to device array copy hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); //<<< >>> CUDA semantic int nr_blocks; int nr_threads; if(n > 512) { nr_blocks = n / 512 + 1; nr_threads = 512; } else { nr_blocks = 1; nr_threads = n; } hipLaunchKernelGGL(( testKernel) , dim3(nr_blocks),dim3( nr_threads), 0, 0, d_a, d_sorted, n, n); //Device to Host array for final display (I/O) hipMemcpy(a_sorted, d_sorted, size, hipMemcpyDeviceToHost); printf("Sorted: "); for (i = 0; i < n; i++) { printf("%ld, ", a_sorted[i]); } printf("\n"); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime( & milliseconds, start, stop); printf("Time spent: %.5f\n", milliseconds); size_t free_byte; size_t total_byte; hipMemGetInfo( & free_byte, & total_byte); double free_db = (double)free_byte; double total_db = (double)total_byte; double used_db = total_db - free_db; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0); hipEventDestroy(start); hipEventDestroy(stop); //free memory allocated by malloc and cudamalloc free(a); free(a_sorted); hipFree(d_sorted); hipFree(d_a); }
3ccd747185725a453f0735aff7288f0696819cd2.cu
%%cu # include <stdio.h> # include <stdlib.h> # include <cuda.h> long n = 33000; __global__ void testKernel(long *in, long *out, long size, long n) { int index = blockIdx.x * blockDim.x + threadIdx.x; long temp; __shared__ bool swappedodd; __shared__ bool swappedeven; long i; // printf("Thread number: %d ;", index); for(i=0; i < n; i++) { if(n % 2 == 0) { if (i % 2 == 0) { __syncthreads(); swappedeven = false; __syncthreads(); if (index < (size / 2)) { //printf("Odd: Thread number: %d ;", index); if (in[index * 2] > in[index * 2 + 1]) { //printf("Odd: Thread %d change: %ld <-> %ld ;", index, in[index*2], in[index*2 +1]); temp = in[index * 2]; in[index * 2] = in[index * 2 + 1]; in[index * 2 + 1] = temp; swappedeven=true; } } __syncthreads(); } else { __syncthreads(); swappedodd = false; __syncthreads(); if (index < (size / 2) - 1) { //printf("Even: Thread number: %d ;", index); if (in[index * 2 + 1] > in[index * 2 + 2]) { //printf("Even: Thread %d change: %ld <-> %ld ;", index, in[index*2+1], in[index*2 +2]); temp = in[index * 2 + 1]; in[index * 2 + 1] = in[index * 2 + 2]; in[index * 2 + 2] = temp; swappedodd=true; } } __syncthreads(); } } else { if (i % 2 == 0) { __syncthreads(); swappedeven = false; __syncthreads(); if (index < (size / 2)) { //printf("Odd: Thread number: %d ;", index); if (in[index * 2] > in[index * 2 + 1]) { //printf("Odd: Thread %d change: %ld <-> %ld ;", index, in[index*2], in[index*2 +1]); temp = in[index * 2]; in[index * 2] = in[index * 2 + 1]; in[index * 2 + 1] = temp; swappedeven=true; } } __syncthreads(); } else { __syncthreads(); swappedodd = false; __syncthreads(); if (index < (size / 2)) { //printf("Even: Thread number: %d ;", index); if (in[index * 2 + 1] > in[index * 2 + 2]) { //printf("Even: Thread %d change: %ld <-> %ld ;", index, in[index*2+1], in[index*2 +2]); temp = in[index * 2 + 1]; in[index * 2 + 1] = in[index * 2 + 2]; in[index * 2 + 2] = temp; swappedodd=true; } } __syncthreads(); } } if (!(swappedodd || swappedeven)) break; } __syncthreads(); //printf("Final: Thread %d val = %ld ;", index, in[index]); // printf("Final: Thread %d val = %ld ;", size / 2 + index, in[size / 2 + index]); out[index] = in[index]; out[size / 2 + index] = in[size / 2 + index]; if(n % 2 != 0) { if(index == size / 2 - 1) { out[size - 1] = in[size - 1]; } } } int main(void) { long i; long * a, * a_sorted; long * d_a, * d_sorted; //int n = 1* 1000 * 10; //make sure to keep this even long size = sizeof(long) * n; cudaEvent_t start, stop; cudaEventCreate( & start); cudaEventCreate( & stop); cudaEventRecord(start, 0); cudaMalloc((void ** ) & d_a, size); cudaMalloc((void ** ) & d_sorted, size); a = (long * )malloc(size); a_sorted = (long * )malloc(size); time_t t; /* Intializes random number generator */ srand((unsigned)time( & t)); int random_nr; for (i = 0; i < n; i++) { random_nr = rand() % 100; a[i] = random_nr; //printf(" a[%d] = %ld ", i, a[i]); printf("%ld ;", a[i]); } //d_a -> destination. a -> source. //Host to device array copy cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); //<<< >>> CUDA semantic int nr_blocks; int nr_threads; if(n > 512) { nr_blocks = n / 512 + 1; nr_threads = 512; } else { nr_blocks = 1; nr_threads = n; } testKernel <<< nr_blocks, nr_threads>>> (d_a, d_sorted, n, n); //Device to Host array for final display (I/O) cudaMemcpy(a_sorted, d_sorted, size, cudaMemcpyDeviceToHost); printf("Sorted: "); for (i = 0; i < n; i++) { printf("%ld, ", a_sorted[i]); } printf("\n"); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime( & milliseconds, start, stop); printf("Time spent: %.5f\n", milliseconds); size_t free_byte; size_t total_byte; cudaMemGetInfo( & free_byte, & total_byte); double free_db = (double)free_byte; double total_db = (double)total_byte; double used_db = total_db - free_db; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0); cudaEventDestroy(start); cudaEventDestroy(stop); //free memory allocated by malloc and cudamalloc free(a); free(a_sorted); cudaFree(d_sorted); cudaFree(d_a); }
29f97827fa2b5b1ea55194eb928370fb0b73e45f.hip
// !!! This is a file automatically generated by hipify!!! //xfail:BOOGIE_ERROR //--blockDim=2 --gridDim=1 --no-inline #include <hip/hip_runtime.h> __global__ void race_test (unsigned int* i, int* A) { int tid = threadIdx.x; int j = atomicAdd(i,tid); A[j] = tid; }
29f97827fa2b5b1ea55194eb928370fb0b73e45f.cu
//xfail:BOOGIE_ERROR //--blockDim=2 --gridDim=1 --no-inline #include <cuda.h> __global__ void race_test (unsigned int* i, int* A) { int tid = threadIdx.x; int j = atomicAdd(i,tid); A[j] = tid; }
6144f494c6db3a5cd5dc8f3e14e06a7d139148e8.hip
// !!! This is a file automatically generated by hipify!!! #include "board.hpp" Board::Board(int height, int width) : shape(height, width) , d_board(nullptr), h_board(nullptr) , d_allocated(false), h_allocated(false) {} auto Board::allocate_memory() -> void { device_allocate_memory(); host_allocate_memory(); } auto Board::allocate_memory_if_not_allocated(Shape shape) -> void { if(!d_allocated && !h_allocated){ this->shape = shape; allocate_memory(); } } auto Board::device_allocate_memory() -> void { if(!d_allocated){ char* d_memory = nullptr; hipMalloc(&d_memory,shape.width * shape.height * sizeof(char)); d_board = std::shared_ptr<char> (d_memory,[&](char* ptr){ hipFree(ptr); }); d_allocated = true; } } auto Board::host_allocate_memory() -> void { if(!h_allocated){ h_board = std::shared_ptr<char>(new char[shape.width * shape.height],[&](char* ptr){ delete[] ptr; }); h_allocated = true; } } auto Board::copy_device_to_host() -> void { if(d_allocated && h_allocated){ hipMemcpy(h_board.get(), d_board.get() ,shape.width * shape.height * sizeof(char) ,hipMemcpyDeviceToHost); } } auto Board::copy_host_to_device() -> void { if(d_allocated && h_allocated){ hipMemcpy(d_board.get(), h_board.get() ,shape.width * shape.height * sizeof(char) ,hipMemcpyHostToDevice); } } auto Board::operator[](const int index) -> char& { return h_board.get()[index]; } auto Board::operator[](const int index) const -> const char& { return h_board.get()[index]; }
6144f494c6db3a5cd5dc8f3e14e06a7d139148e8.cu
#include "board.hpp" Board::Board(int height, int width) : shape(height, width) , d_board(nullptr), h_board(nullptr) , d_allocated(false), h_allocated(false) {} auto Board::allocate_memory() -> void { device_allocate_memory(); host_allocate_memory(); } auto Board::allocate_memory_if_not_allocated(Shape shape) -> void { if(!d_allocated && !h_allocated){ this->shape = shape; allocate_memory(); } } auto Board::device_allocate_memory() -> void { if(!d_allocated){ char* d_memory = nullptr; cudaMalloc(&d_memory,shape.width * shape.height * sizeof(char)); d_board = std::shared_ptr<char> (d_memory,[&](char* ptr){ cudaFree(ptr); }); d_allocated = true; } } auto Board::host_allocate_memory() -> void { if(!h_allocated){ h_board = std::shared_ptr<char>(new char[shape.width * shape.height],[&](char* ptr){ delete[] ptr; }); h_allocated = true; } } auto Board::copy_device_to_host() -> void { if(d_allocated && h_allocated){ cudaMemcpy(h_board.get(), d_board.get() ,shape.width * shape.height * sizeof(char) ,cudaMemcpyDeviceToHost); } } auto Board::copy_host_to_device() -> void { if(d_allocated && h_allocated){ cudaMemcpy(d_board.get(), h_board.get() ,shape.width * shape.height * sizeof(char) ,cudaMemcpyHostToDevice); } } auto Board::operator[](const int index) -> char& { return h_board.get()[index]; } auto Board::operator[](const int index) const -> const char& { return h_board.get()[index]; }
3e80c91777168698954e62572f802ddd888fa6d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> c d s */ #include "common_magma.h" #include "common_magmasparse.h" #define BLOCK_SIZE 256 // kernel __global__ void zdiagcheck_kernel( int num_rows, int num_cols, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magma_int_t * dinfo ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ int localinfo = 1; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; // check whether there exists a nonzero diagonal entry for( j=start; j<end; j++){ if( (dcolind[j] == row) && (dval[j] != MAGMA_Z_ZERO) ){ localinfo = 0; } } // set flag to 1 if( localinfo == 1 ){ dinfo[0] = -3009; } } } /** Purpose ------- This routine checks for a CSR matrix whether there exists a zero on the diagonal. This can be the diagonal entry missing or an explicit zero. Arguments --------- @param[in] dA magma_z_matrix matrix in CSR format @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zdiagcheck( magma_z_matrix dA, magma_queue_t queue ) { magma_int_t info = 0; magma_int_t *hinfo = NULL; magma_int_t * dinfo = NULL; dim3 grid( magma_ceildiv( dA.num_rows, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; CHECK( magma_imalloc( &dinfo, 1 ) ); CHECK( magma_imalloc_cpu( &hinfo, 1 ) ); hinfo[0] = 0; magma_isetvector( 1, hinfo, 1, dinfo, 1 ); hipLaunchKernelGGL(( zdiagcheck_kernel), dim3(grid), dim3(threads), 0, queue , dA.num_rows, dA.num_cols, dA.dval, dA.drow, dA.dcol, dinfo ); info = hinfo[0]; magma_igetvector( 1, dinfo, 1, hinfo, 1 ); info = hinfo[0]; cleanup: magma_free( dinfo ); magma_free_cpu( hinfo ); return info; }
3e80c91777168698954e62572f802ddd888fa6d9.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> c d s */ #include "common_magma.h" #include "common_magmasparse.h" #define BLOCK_SIZE 256 // kernel __global__ void zdiagcheck_kernel( int num_rows, int num_cols, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magma_int_t * dinfo ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ int localinfo = 1; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; // check whether there exists a nonzero diagonal entry for( j=start; j<end; j++){ if( (dcolind[j] == row) && (dval[j] != MAGMA_Z_ZERO) ){ localinfo = 0; } } // set flag to 1 if( localinfo == 1 ){ dinfo[0] = -3009; } } } /** Purpose ------- This routine checks for a CSR matrix whether there exists a zero on the diagonal. This can be the diagonal entry missing or an explicit zero. Arguments --------- @param[in] dA magma_z_matrix matrix in CSR format @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zdiagcheck( magma_z_matrix dA, magma_queue_t queue ) { magma_int_t info = 0; magma_int_t *hinfo = NULL; magma_int_t * dinfo = NULL; dim3 grid( magma_ceildiv( dA.num_rows, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; CHECK( magma_imalloc( &dinfo, 1 ) ); CHECK( magma_imalloc_cpu( &hinfo, 1 ) ); hinfo[0] = 0; magma_isetvector( 1, hinfo, 1, dinfo, 1 ); zdiagcheck_kernel<<< grid, threads, 0, queue >>> ( dA.num_rows, dA.num_cols, dA.dval, dA.drow, dA.dcol, dinfo ); info = hinfo[0]; magma_igetvector( 1, dinfo, 1, hinfo, 1 ); info = hinfo[0]; cleanup: magma_free( dinfo ); magma_free_cpu( hinfo ); return info; }
9ca3e43c65bbc29c20d0498534710b54553ebc75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// template <typename T> __global__ void execFillIsMax(void *vdZ, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) { auto dz = reinterpret_cast<T*>(vdZ); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) dz[shape::getIndexOffset(i, xShapeInfo, length)] = (i == idx ? (T) 1 : (T) 0); } //////////////////////////////////////////////////////////////////////// template <typename T> __host__ void fillIsMaxGeneric(dim3 &launchDims, hipStream_t *stream, void *dx, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) { hipLaunchKernelGGL(( execFillIsMax<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, xShapeInfo, length, idx); nd4j::DebugHelper::checkErrorCode(stream, "fillIsMax(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT fillIsMaxGeneric, (dim3& launchDims, hipStream_t *stream, void* dz, Nd4jLong *zShapeInfo, Nd4jLong length, long idx), LIBND4J_TYPES); }
9ca3e43c65bbc29c20d0498534710b54553ebc75.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// template <typename T> __global__ void execFillIsMax(void *vdZ, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) { auto dz = reinterpret_cast<T*>(vdZ); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) dz[shape::getIndexOffset(i, xShapeInfo, length)] = (i == idx ? (T) 1 : (T) 0); } //////////////////////////////////////////////////////////////////////// template <typename T> __host__ void fillIsMaxGeneric(dim3 &launchDims, cudaStream_t *stream, void *dx, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) { execFillIsMax<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, xShapeInfo, length, idx); nd4j::DebugHelper::checkErrorCode(stream, "fillIsMax(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT fillIsMaxGeneric, (dim3& launchDims, cudaStream_t *stream, void* dz, Nd4jLong *zShapeInfo, Nd4jLong length, long idx), LIBND4J_TYPES); }
6fe40ddce4300de65ac3b9b51b6dc4cb49a8e6e7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include "common.h" #include <stdio.h> #include <hiprand/hiprand_kernel.h> #define THREADS_PER_BLOCK 32 #define SEED 60 __global__ void mcOnGPU(float *f, const int N, const int nb, hiprandState_t *states) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; int n = N/nb; double cc = 0; //initialse hiprand hiprand_init((SEED << 20) + ix, 0, 0, &states[ix]); //hiprandState_t state = states[ix]; if (ix<n) { for(int i=0; i<nb; i++) { double xran = hiprand_uniform_double (&states[ix]); cc += cos(-log(xran)); } } atomicAdd(f,cc); __syncthreads(); } __global__ void integralOnGPU(float *f,double *Int ,const int N) { Int[0] = abs(f[0]/N); } int main(int argc, char **argv) { // problem size long long int N = atol(argv[1]); int T = THREADS_PER_BLOCK; //random number generator hiprandState_t *States; // malloc host memory double gpuRef; //start timing double iStart = seconds(); // malloc device global memory float *d_f; double *d_Int; CHECK(hipMalloc((void **)&d_f, sizeof(double))); CHECK(hipMalloc((void **)&d_Int, sizeof(double))); //invoke the kernel int B = ((N + T -1)/T); if(B > 65535) B = 65535; int nb = ceil((N*1.0)/(B*T)); //states allocate memory CHECK(hipMalloc( (void **)&States, (B*T)*sizeof(hiprandState_t))); hipLaunchKernelGGL(( mcOnGPU), dim3(B),dim3(T), 0, 0, d_f, N, nb, States); CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( integralOnGPU), dim3(1),dim3(1), 0, 0, d_f,d_Int ,N); CHECK(hipDeviceSynchronize()); // check kernel error CHECK(hipGetLastError()); double iElaps_g = seconds() - iStart; // copy kernel result back to host side CHECK(hipMemcpy(&gpuRef, d_Int, sizeof(double), hipMemcpyDeviceToHost)); //error achived double error = abs(0.5 - gpuRef); //printf("%lld,%f,%e,%f\n",N,gpuRef,error,iElaps_g); printf("%lld,%f,%e,%f\n",N,gpuRef,error,iElaps_g); //free device memory CHECK(hipFree(States)); CHECK(hipFree(d_f)); CHECK(hipFree(d_Int)); // reset device CHECK(hipDeviceReset()); return (0); }
6fe40ddce4300de65ac3b9b51b6dc4cb49a8e6e7.cu
#include <cuda_runtime.h> #include <curand.h> #include "common.h" #include <stdio.h> #include <curand_kernel.h> #define THREADS_PER_BLOCK 32 #define SEED 60 __global__ void mcOnGPU(float *f, const int N, const int nb, curandState *states) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; int n = N/nb; double cc = 0; //initialse curand curand_init((SEED << 20) + ix, 0, 0, &states[ix]); //curandState state = states[ix]; if (ix<n) { for(int i=0; i<nb; i++) { double xran = curand_uniform_double (&states[ix]); cc += cos(-log(xran)); } } atomicAdd(f,cc); __syncthreads(); } __global__ void integralOnGPU(float *f,double *Int ,const int N) { Int[0] = abs(f[0]/N); } int main(int argc, char **argv) { // problem size long long int N = atol(argv[1]); int T = THREADS_PER_BLOCK; //random number generator curandState *States; // malloc host memory double gpuRef; //start timing double iStart = seconds(); // malloc device global memory float *d_f; double *d_Int; CHECK(cudaMalloc((void **)&d_f, sizeof(double))); CHECK(cudaMalloc((void **)&d_Int, sizeof(double))); //invoke the kernel int B = ((N + T -1)/T); if(B > 65535) B = 65535; int nb = ceil((N*1.0)/(B*T)); //states allocate memory CHECK(cudaMalloc( (void **)&States, (B*T)*sizeof(curandState))); mcOnGPU<<<B,T>>>(d_f, N, nb, States); CHECK(cudaDeviceSynchronize()); integralOnGPU<<<1,1>>>(d_f,d_Int ,N); CHECK(cudaDeviceSynchronize()); // check kernel error CHECK(cudaGetLastError()); double iElaps_g = seconds() - iStart; // copy kernel result back to host side CHECK(cudaMemcpy(&gpuRef, d_Int, sizeof(double), cudaMemcpyDeviceToHost)); //error achived double error = abs(0.5 - gpuRef); //printf("%lld,%f,%e,%f\n",N,gpuRef,error,iElaps_g); printf("%lld,%f,%e,%f\n",N,gpuRef,error,iElaps_g); //free device memory CHECK(cudaFree(States)); CHECK(cudaFree(d_f)); CHECK(cudaFree(d_Int)); // reset device CHECK(cudaDeviceReset()); return (0); }
b4ccd107b86f76b3dff196d3265ceb867529d84a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <demo_util.h> #define CLOCK_RATE 1124000 // in kHZ __device__ uint get_smid(void) { uint ret; asm("mov.u32 %0, %smid;" : "=r"(ret) ); return ret; } __device__ void sleep(float t) { clock_t t0 = clock64(); clock_t t1 = t0; while ((t1 - t0)/(CLOCK_RATE*1000.0f) < t) { t1 = clock64(); } } __global__ void worker(float *t,int *s) { int id = blockIdx.x; s[id] = get_smid(); sleep(t[id]); } #define N 1536 int main(void) { float *dev_t; int *dev_s; float t[N]; int s[N]; float SM[8] = {0,0,0,0,0,0,0,0}; int i; /* Allocate memory on the device */ hipMalloc( (void**)&dev_t, N*sizeof(float)); hipMalloc( (void**)&dev_s, N*sizeof(int)); random_seed(); float maxt = 0; for(i = 0; i < N; i++) { t[i] = 1; // 10*random_number(); maxt = t[i] > maxt ? t[i] : maxt; } hipMemcpy(dev_t, t, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( worker), dim3(48),dim3(32), 0, 0, dev_t, dev_s); /* Copy contents of dev_t back to t */ hipMemcpy(s, dev_s, N*sizeof(float), hipMemcpyDeviceToHost); for(i = 0; i < N; i++) { printf( "Block %2d worked for %8.4f seconds on SM %d\n",i,t[i],s[i]); SM[s[i]] += t[i]; } printf("Max t = %8.3f\n",maxt); printf("\n"); for(i = 7; i>= 0; i--) { printf("SM[%d] = %8.4f\n",i,SM[i]); } hipFree(dev_t); }
b4ccd107b86f76b3dff196d3265ceb867529d84a.cu
#include <stdio.h> #include <demo_util.h> #define CLOCK_RATE 1124000 // in kHZ __device__ uint get_smid(void) { uint ret; asm("mov.u32 %0, %smid;" : "=r"(ret) ); return ret; } __device__ void sleep(float t) { clock_t t0 = clock64(); clock_t t1 = t0; while ((t1 - t0)/(CLOCK_RATE*1000.0f) < t) { t1 = clock64(); } } __global__ void worker(float *t,int *s) { int id = blockIdx.x; s[id] = get_smid(); sleep(t[id]); } #define N 1536 int main(void) { float *dev_t; int *dev_s; float t[N]; int s[N]; float SM[8] = {0,0,0,0,0,0,0,0}; int i; /* Allocate memory on the device */ cudaMalloc( (void**)&dev_t, N*sizeof(float)); cudaMalloc( (void**)&dev_s, N*sizeof(int)); random_seed(); float maxt = 0; for(i = 0; i < N; i++) { t[i] = 1; // 10*random_number(); maxt = t[i] > maxt ? t[i] : maxt; } cudaMemcpy(dev_t, t, N*sizeof(float), cudaMemcpyHostToDevice); worker<<<48,32>>>(dev_t, dev_s); /* Copy contents of dev_t back to t */ cudaMemcpy(s, dev_s, N*sizeof(float), cudaMemcpyDeviceToHost); for(i = 0; i < N; i++) { printf( "Block %2d worked for %8.4f seconds on SM %d\n",i,t[i],s[i]); SM[s[i]] += t[i]; } printf("Max t = %8.3f\n",maxt); printf("\n"); for(i = 7; i>= 0; i--) { printf("SM[%d] = %8.4f\n",i,SM[i]); } cudaFree(dev_t); }
52549beb052ddd6e284d369b49061b33d2ea9722.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // FreckleFilter // #include "FreckleFilter.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" #include "Template.h" #include "TemplateFactory.h" // DEF_BLOCK_X DEF_BLOCK_Y // #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Device _getMaxMatchValueDev length // // length static __device__ void // _getMaxMatchValueDev( unsigned int *histogram1, // unsigned int *histogram2, // float &maxmatchvalue, // int length, // int hisnum = 256 // 256 ); // Kernel _freckleFilterByVarSumCountKer // // static __global__ void // Kernel _freckleFilterByVarSumCountKer( ImageCuda inimg, // Template radtpl, // Template archtpl, // float varTh, // float *sum, // int *count // ); // Kernel _freckleFilterPixelKer // static __global__ void // Kernel _freckleFilterSetPixelKer( ImageCuda inimg, // ImageCuda outimg, // float *sum, // int *count, // int select // ); // Kernel _freckleFilterByStrMscKer // // // static __global__ void // Kernel _freckleFilterByStrMscKer( ImageCuda inimg, // Template radtpl, // Template archtpl, // float matchErrTh, // int length, // int radius, // float *sum, // int *count // ); // Kernel _freckleFilterByVarSumCountKer // static __global__ void _freckleFilterByVarSumCountKer( ImageCuda inimg, Template radtpl, Template archtpl, float varTh, float *sum, int *count) { // dstc dstr x y // dstc column dstr row // 4 4 // dstr 4 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; // x y int dx, dy; // int *curtplptr; // unsigned char *curinptr; // int statistic[4] = { 0 , 0, 0, 0 }; // float m[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // float mean[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // float variance[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; int pix; // // curtplptr = archtpl.tplData; // for (int i = 0; i < archtpl.count; i++) { // x y // dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); float temp; // // x , // 4 if (dx >= 0 && dx < inimg.imgMeta.width) { // dx dy curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; // y if (dy >= 0 && dy < inimg.imgMeta.height) { // pix = *(curinptr); statistic[0]++; temp = pix - mean[0]; mean[0] += temp / statistic[0]; m[0] += temp * (pix - mean[0]); } // curinptr = curinptr + inimg.pitchBytes; dy++; // y if (dy >= 0 && dy < inimg.imgMeta.height) { // pix = *(curinptr); statistic[1]++; temp = pix - mean[1]; mean[1] += temp / statistic[1]; m[1] += temp * (pix - mean[1]); } // curinptr = curinptr + inimg.pitchBytes; dy++; // y if (dy >= 0 && dy < inimg.imgMeta.height) { // pix = *(curinptr); statistic[2]++; temp = pix - mean[2]; mean[2] += temp / statistic[2]; m[2] += temp * (pix - mean[2]); } // curinptr = curinptr + inimg.pitchBytes; dy++; // y if (dy >= 0 && dy < inimg.imgMeta.height) { // pix = *(curinptr); statistic[3]++; temp = pix - mean[3]; mean[3] += temp / statistic[3]; m[3] += temp * (pix - mean[3]); } } } // int index; // for(int i = 0; i < 4; i++) { // 0 if(statistic[i] == 0) continue; // variance[i] = m[i] / statistic[i]; // if (variance[i] < varTh) { // curtplptr = radtpl.tplData; // for (int j = 0; j < radtpl.count; j++) { // x y // dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // dx dy dy = dy + i; index = dx + dy * inimg.imgMeta.width; // // 1 if (dx >= 0 && dx < inimg.imgMeta.width && dy >= 0 && dy < inimg.imgMeta.height) { atomicAdd(&sum[index], mean[i]); atomicAdd(&count[index], 1); } } } } } // Kernel _freckleFilterSetPixelKer static __global__ void _freckleFilterSetPixelKer( ImageCuda inimg, ImageCuda outimg, float *sum, int *count, int select) { // dstc dstr x y // c column r row // 4 4 // dstr 4 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height) return; // int outidx = dstr * outimg.imgMeta.width + dstc; int out = dstr * outimg.pitchBytes + dstc; int temp; // float int // float int if (count[outidx] == 0) { // FRECKLE_OPEN // FRECKLE_CLOSE 0 if (select == FRECKLE_OPEN) temp = inimg.imgMeta.imgData[out]; else if (select == FRECKLE_CLOSE) temp = 0; } else { // temp = (int)(sum[outidx] / count[outidx] + 0.5f); } // outimg.imgMeta.imgData[out] = (unsigned char)temp; // for (int i = 0; i < 3; i++) { // x // y x // if (++dstr >= outimg.imgMeta.height) return; // y // 1 pitch // outidx += outimg.imgMeta.width; out += outimg.pitchBytes; // float int if (count[outidx] == 0) { // FRECKLE_OPEN // FRECKLE_CLOSE 0 if (select == FRECKLE_OPEN) temp = inimg.imgMeta.imgData[out]; else if (select == FRECKLE_CLOSE) temp = 0; } else { // temp = (int)(sum[outidx] / count[outidx] + 0.5f); } // outimg.imgMeta.imgData[out] = (unsigned char)temp; } } // Device _getMaxMatchValueDev length // static __device__ void _getMaxMatchValueDev( unsigned int *histogram1, unsigned int *histogram2, float &maxmatchvalue, int length, int hisnum) { // matchvalue float matchvalue = 0.0f; // // location int location = hisnum - length; for (int j = 0; j <= location; j++) { // unsigned int sum1 = { 0 }; unsigned int sum2 = { 0 }; unsigned int sum3 = { 0 }; unsigned int sum4 = { 0 }; unsigned int sum5 = { 0 }; // unsigned int tmp1, tmp2; // float m1, m2; // for (int k = 0; k < length; k++) { // tmp1 = *(histogram1 + j + k); tmp2 = *(histogram2 + j + k); // sum1 += tmp1; sum2 += tmp2; sum3 += tmp1 * tmp2; sum4 += tmp1 * tmp1; sum5 += tmp2 * tmp2; } // m1 = sqrtf((float)(length * sum4 - sum1 * sum1)); m2 = sqrtf((float)(length * sum5 - sum2 * sum2)); // if (m1 <= 0.000001f || m2 <= 0.000001f) matchvalue = 0.0f; else matchvalue = ((int)(length * sum3 - sum1 * sum2)) / (m1 * m2); // if (matchvalue > maxmatchvalue) { maxmatchvalue = matchvalue; } } } // Kernel _freckleFilterByStrMscKer // static __global__ void _freckleFilterByStrMscKer( ImageCuda inimg, Template radtpl, Template archtpl, float matchErrTh, int length, int radius, float *sum, int *count) { // dstc dstr x y // dstc column dstr row // 4 4 // dstr 4 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // if (dstc % radius != 0 || dstr % radius != 0 || dstc <= 0 || dstr <= 0 || dstc >= inimg.imgMeta.width - 1 || dstr >= inimg.imgMeta.height - 1) return; // x y int dx, dy; // int *curtplptr; // unsigned char *curinptr; // histogram1 unsigned int histogram1[256] = { 0 }; // histogram2 unsigned int histogram2[256] = { 0 }; // int statistic = 0; unsigned int pix; // // curtplptr = archtpl.tplData; // for (int i = 0; i < archtpl.count; i++) { // x y // dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // x y // 4 if (dx >= 0 && dx < inimg.imgMeta.width && dy >= 0 && dy < inimg.imgMeta.height) { // dx dy curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; pix = *(curinptr); histogram1[pix]++; statistic++; } } // 0 if(statistic == 0) return; // curtplptr = radtpl.tplData; // for (int i = 0; i < radtpl.count; i++) { // x y // dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // x y // 4 if (dx >= 0 && dx < inimg.imgMeta.width) { // dx dy curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; pix = *(curinptr); histogram2[pix]++; } } // float maxmatchvalue = 0.0f; // _getMaxMatchValueDev(histogram1, histogram2, maxmatchvalue, length, 256); // int index; // // 50% // if (1 - maxmatchvalue > matchErrTh) { // 50% float mean; // int lownum = (int)(statistic * 0.25f + 0.5f); // int highnum = (int)(statistic * 0.25f + 0.5f); // int lowcount = 0, highcount = 0; // int lowvalue = 0, highvalue = 0; // bool lowmask = false, highmask = false; // int lowindex = 0, highindex = 0; for (int k = 0; k < 256; k++) { // lowcount += histogram1[k]; if (!lowmask && lowcount >= lownum) { lowindex = k + 1; lowvalue = (lowcount - lownum) * k; lowmask = true; } // int high = 255 - k; // highcount += histogram1[high]; if (!highmask && highcount >= highnum) { highindex = high - 1; highvalue = (highcount - highnum) * high; highmask = true; } // if (lowmask && highmask) break; } // lowindex highindex if (lowindex > highindex) return; // float tmpsum = (float)(lowvalue + highvalue); for (int k = lowindex; k <= highindex; k++) tmpsum += k * histogram1[k]; // mean = tmpsum / (statistic - lownum - highnum); // curtplptr = radtpl.tplData; // for (int j = 0; j < radtpl.count; j++) { // x y // dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // dx dy dy++; index = dx + dy * inimg.imgMeta.width; // // 1 if (dx >= 0 && dx < inimg.imgMeta.width && dy >= 0 && dy < inimg.imgMeta.height) { atomicAdd(&sum[index], mean); atomicAdd(&count[index], 1); } } } } // Host freckleFilter __host__ int FreckleFilter::freckleFilter(Image *inimg, Image *outimg) { // NULL NULL if (inimg == NULL || outimg == NULL) return NULL_POINTER; // if (select != FRECKLE_OPEN && select != FRECKLE_CLOSE) return INVALID_DATA; int errcode; // // Device errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // Device errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // // ROI errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // if (errcode != NO_ERROR) return errcode; } // ROI ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // ROI ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // radtpl Template *radtpl; // dim3 radsize(this->radius * 2 + 1, this->radius * 2 + 1, 1); // errcode = TemplateFactory::getTemplate(&radtpl, TF_SHAPE_CIRCLE, radsize, NULL); // NULL NULL if (errcode != NO_ERROR) return errcode; // Device errcode = TemplateBasicOp::copyToCurrentDevice(radtpl); if (errcode != NO_ERROR) { // radtpl TemplateFactory::putTemplate(radtpl); return errcode; } // archtpl Template *archtpl; // dim3 arcsize(this->radius * 2 + 1, (this->radius + 4) * 2 + 1, 1); // errcode = TemplateFactory::getTemplate(&archtpl, TF_SHAPE_ARC, arcsize, NULL); // NULL NULL if (errcode != NO_ERROR) { // radtpl TemplateFactory::putTemplate(radtpl); return errcode; } // Device errcode = TemplateBasicOp::copyToCurrentDevice(archtpl); if (errcode != NO_ERROR) { // TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); return errcode; } // Kernel dim3 blocksize, gridsize1, gridsize2; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize1.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize1.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); gridsize2.x = gridsize1.x; gridsize2.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // size_t datasize = outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height; hipError_t cuerrcode; // CUDA // sum outsubimgCud.imgMeta.width * // outsubimgCud.imgMeta.height float // float *sum; // count outsubimgCud.imgMeta.width * // outsubimgCud.imgMeta.height int // int *count; // void *temp_dev; // cuerrcode = hipMalloc( (void **)&temp_dev, datasize * sizeof (float) + datasize * sizeof (int)); if (cuerrcode != hipSuccess) { // TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); return CUDA_ERROR; } // sum = (float *)temp_dev; count = (int *)(sum + datasize); // 0 cuerrcode = hipMemset(sum, 0, datasize * sizeof (float)); if (cuerrcode != hipSuccess) { // TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // hipFree(temp_dev); return CUDA_ERROR; } // 0 cuerrcode = hipMemset(count, 0, datasize * sizeof (int)); if (cuerrcode != hipSuccess) { // TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // hipFree(temp_dev); return CUDA_ERROR; } if (method == FRECKLE_VAR_TH) { // Kernel // hipLaunchKernelGGL(( _freckleFilterByVarSumCountKer), dim3(gridsize1), dim3(blocksize), 0, 0, insubimgCud, *radtpl, *archtpl, this->varTh, sum, count); } else if (method == FRECKLE_MATCH_ERRTH) { // Kernel // hipLaunchKernelGGL(( _freckleFilterByStrMscKer), dim3(gridsize2), dim3(blocksize), 0, 0, insubimgCud, *radtpl, *archtpl, this->matchErrTh, this->length, this->radius, sum, count); } else { // method return INVALID_DATA; } // if (hipGetLastError() != hipSuccess) { // TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // hipFree(temp_dev); return CUDA_ERROR; } // TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // Kernel hipLaunchKernelGGL(( _freckleFilterSetPixelKer), dim3(gridsize1), dim3(blocksize), 0, 0, insubimgCud, outsubimgCud, sum, count, this->select); // if (hipGetLastError() != hipSuccess) { // hipFree(temp_dev); return CUDA_ERROR; } // hipFree(temp_dev); // return NO_ERROR; }
52549beb052ddd6e284d369b49061b33d2ea9722.cu
// FreckleFilter // 实现广义的中值滤波 #include "FreckleFilter.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" #include "Template.h" #include "TemplateFactory.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Device 函数:_getMaxMatchValueDev(得到两个直方图的 length 长度 // 相似度匹配最大值) // 对圆周和园内两直方图进行长度为 length 的相似度匹配,返回最大值 static __device__ void // 返回值:无返回值 _getMaxMatchValueDev( unsigned int *histogram1, // 圆周上的直方图 unsigned int *histogram2, // 圆周上的直方图 float &maxmatchvalue, // 像素点对应的最大匹配值指针 int length, // 相似度匹配的长度参数 int hisnum = 256 // 直方图的数组大小,本方法大小为 256 ); // Kernel 函数:_freckleFilterByVarSumCountKer(获得输出图像的每点像素平均值总 // 和与累加次数算法操作) // 根据方差阈值大小,得到输出图像的每点像素平均值总和与累加次数算法操作 static __global__ void // Kernel 函数无返回值 _freckleFilterByVarSumCountKer( ImageCuda inimg, // 输入图像 Template radtpl, // 圆形模板,用于指定圆内领域 Template archtpl, // 环形模板,用于指定圆周的邻域 float varTh, // 外部指定的方差阈值 float *sum, // 像素平均值累加总和 int *count // 像素平均值累加次数 ); // Kernel 函数:_freckleFilterPixelKer(实现给输出图像设定像素值算法操作) // 根据每点像素累加总和与累加次数,给输出图像设定像素平均值 static __global__ void // Kernel 函数无返回值 _freckleFilterSetPixelKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 float *sum, // 像素平均值累加总和 int *count, // 像素平均值累加次数 int select // 最后赋值时的选择参数 ); // Kernel 函数:_freckleFilterByStrMscKer(获得输出图像的每点像素平均值总 // 和与累加次数算法操作) // 通过相似度匹配,根据匹配差阈值,得到输出图像的每点像素平均值总和与 // 累加次数算法操作 static __global__ void // Kernel 函数无返回值 _freckleFilterByStrMscKer( ImageCuda inimg, // 输入图像 Template radtpl, // 圆形模板,用于指定圆内领域 Template archtpl, // 环形模板,用于指定圆周的邻域 float matchErrTh, // 外部指定的匹配差阈值 int length, // 相似度匹配的长度参数 int radius, // 圆领域的半径 float *sum, // 像素平均值累加总和 int *count // 像素平均值累加次数 ); // Kernel 函数:_freckleFilterByVarSumCountKer(实现给输出图像设定像素值算法 // 操作) static __global__ void _freckleFilterByVarSumCountKer( ImageCuda inimg, Template radtpl, Template archtpl, float varTh, float *sum, int *count) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 // dstc 表示 column, dstr 表示 row)。由于采用并行度缩减策略 ,令一个线程 // 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于 // dstr 需要进行乘 4 的计算 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致系统崩溃 if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; // 用来保存临时像素点的坐标的 x 和 y 分量 int dx, dy; // 用来记录当前模版所在位置的指针 int *curtplptr; // 用来记录当前输入图像所在位置的指针 unsigned char *curinptr; // 计数器,用来记录某点在模版范围内拥有的点的个数 int statistic[4] = { 0 , 0, 0, 0 }; // 迭代求平均值和方差使用的中间值 float m[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // 计算得到的平均值 float mean[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // 计算得到的拱圆模板领域方差 float variance[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; int pix; // 局部变量,临时存储像素值 // 指定当前环形模版所在位置 curtplptr = archtpl.tplData; // 扫描环形模版范围内的每个输入图像的像素点 for (int i = 0; i < archtpl.count; i++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的 // 数组表示一个点,所以使用当前模版位置的指针加一操作 dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); float temp; // 局部变量,在进行迭代时的中间变量 // 先判断当前像素的 x 分量是否越界,如果越界,则跳过,扫描下一个模版点, // 如果没有越界,则分别处理当前列的相邻的 4 个像素 if (dx >= 0 && dx < inimg.imgMeta.width) { // 根据 dx 和 dy 获取第一个像素的指针 curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; // 检测此像素点的 y 分量是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 对第一个点进行迭代 pix = *(curinptr); statistic[0]++; temp = pix - mean[0]; mean[0] += temp / statistic[0]; m[0] += temp * (pix - mean[0]); } // 获取第二个像素点的指针 curinptr = curinptr + inimg.pitchBytes; dy++; // 检测第二个像素点的 y 分量是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 对第二个点进行迭代 pix = *(curinptr); statistic[1]++; temp = pix - mean[1]; mean[1] += temp / statistic[1]; m[1] += temp * (pix - mean[1]); } // 获取第三个像素点的指针 curinptr = curinptr + inimg.pitchBytes; dy++; // 检测第三个像素点的 y 分量是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 对第三个点进行迭代 pix = *(curinptr); statistic[2]++; temp = pix - mean[2]; mean[2] += temp / statistic[2]; m[2] += temp * (pix - mean[2]); } // 获取第四个像素点的指针 curinptr = curinptr + inimg.pitchBytes; dy++; // 检测第四个像素点的 y 分量是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 对第四个点进行迭代 pix = *(curinptr); statistic[3]++; temp = pix - mean[3]; mean[3] += temp / statistic[3]; m[3] += temp * (pix - mean[3]); } } } // 计算输出坐标点对应的图像数据数组下标。 int index; // 对每个像素点求圆周上点的方差大小,根据方差与阈值大小给输出点累加和 for(int i = 0; i < 4; i++) { // 如果圆周领域内的的点个数为 0,则判断下一个像素点 if(statistic[i] == 0) continue; // 计算环形模板领域的方差 variance[i] = m[i] / statistic[i]; // 如果方差小于给定阈值,则对圆形模板里的所有点赋平均值 if (variance[i] < varTh) { // 指定当前圆形模版所在位置 curtplptr = radtpl.tplData; // 扫描圆形模版范围内的每个输入图像的像素点 for (int j = 0; j < radtpl.count; j++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个 // 下标的数组表示一个点,所以使用当前模版位置的指针加一操作 dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // 根据 dx 和 dy 获取像素下标 dy = dy + i; index = dx + dy * inimg.imgMeta.width; // 如果没有越界,则分别处理当前列的相邻的符合条件的像素 // 给累加和累加平均值,累加次数相应加 1 if (dx >= 0 && dx < inimg.imgMeta.width && dy >= 0 && dy < inimg.imgMeta.height) { atomicAdd(&sum[index], mean[i]); atomicAdd(&count[index], 1); } } } } } // Kernel 函数:_freckleFilterSetPixelKer(实现给输出图像设定像素值算法操作) static __global__ void _freckleFilterSetPixelKer( ImageCuda inimg, ImageCuda outimg, float *sum, int *count, int select) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中, // c 表示 column, r 表示 row)。由于采用并行度缩减策略 ,令一个线程 // 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于 // dstr 需要进行乘 4 的计算 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃 if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int outidx = dstr * outimg.imgMeta.width + dstc; int out = dstr * outimg.pitchBytes + dstc; int temp; // 临时变量用于 float 型数据转 int 型,需要四舍五入 // 计算每一个点的像素平均值,并且四舍五入 float 转 int 型 if (count[outidx] == 0) { // 如果该点没有被累加和,如果为 FRECKLE_OPEN 则应该赋值为 // 原图像对应灰度值,如果为 FRECKLE_CLOSE,则赋值为 0 if (select == FRECKLE_OPEN) temp = inimg.imgMeta.imgData[out]; else if (select == FRECKLE_CLOSE) temp = 0; } else { // 如果被累加和,则按以下方式求像素平均值并按要求处理 temp = (int)(sum[outidx] / count[outidx] + 0.5f); } // 对图像每点像素值赋上对应值 outimg.imgMeta.imgData[out] = (unsigned char)temp; // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++dstr >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 outidx += outimg.imgMeta.width; out += outimg.pitchBytes; // 计算每一个点的像素平均值,并且四舍五入 float 转 int 型 if (count[outidx] == 0) { // 如果该点没有被累加和,如果为 FRECKLE_OPEN 则应该赋值为 // 原图像对应灰度值,如果为 FRECKLE_CLOSE,则赋值为 0 if (select == FRECKLE_OPEN) temp = inimg.imgMeta.imgData[out]; else if (select == FRECKLE_CLOSE) temp = 0; } else { // 如果被累加和,则按以下方式求像素平均值并按要求处理 temp = (int)(sum[outidx] / count[outidx] + 0.5f); } // 对图像每点像素值赋上对应值 outimg.imgMeta.imgData[out] = (unsigned char)temp; } } // Device 函数:_getMaxMatchValueDev(得到两个直方图的 length 长度 // 相似度匹配最大值) static __device__ void _getMaxMatchValueDev( unsigned int *histogram1, unsigned int *histogram2, float &maxmatchvalue, int length, int hisnum) { // 临时变量 matchvalue,存储匹配的结果值 float matchvalue = 0.0f; // 从左端开始匹配 // 临时变量 location,用于定位匹配最右位置 int location = hisnum - length; for (int j = 0; j <= location; j++) { // 临时变量,存储计算相关系数的和 unsigned int sum1 = { 0 }; unsigned int sum2 = { 0 }; unsigned int sum3 = { 0 }; unsigned int sum4 = { 0 }; unsigned int sum5 = { 0 }; // 临时变量,存储获得数组对应值 unsigned int tmp1, tmp2; // 临时变量,存储计算相关系数算法的分母 float m1, m2; // 计算相似度需要用到的临时变量 for (int k = 0; k < length; k++) { // 取得对应直方图值 tmp1 = *(histogram1 + j + k); tmp2 = *(histogram2 + j + k); // 计算相似度要用到的累加和 sum1 += tmp1; sum2 += tmp2; sum3 += tmp1 * tmp2; sum4 += tmp1 * tmp1; sum5 += tmp2 * tmp2; } // 计算相似度的分母临时变量 m1 = sqrtf((float)(length * sum4 - sum1 * sum1)); m2 = sqrtf((float)(length * sum5 - sum2 * sum2)); // 计算匹配的相似度 if (m1 <= 0.000001f || m2 <= 0.000001f) matchvalue = 0.0f; else matchvalue = ((int)(length * sum3 - sum1 * sum2)) / (m1 * m2); // 取相似度最大值 if (matchvalue > maxmatchvalue) { maxmatchvalue = matchvalue; } } } // Kernel 函数:_freckleFilterByStrMscKer(实现 // 给输出图像设定像素值算法操作) static __global__ void _freckleFilterByStrMscKer( ImageCuda inimg, Template radtpl, Template archtpl, float matchErrTh, int length, int radius, float *sum, int *count) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 // dstc 表示 column, dstr 表示 row)。由于采用并行度缩减策略 ,令一个线程 // 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于 // dstr 需要进行乘 4 的计算 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否符合作为圆心的条件,若不符,则不进行处理 if (dstc % radius != 0 || dstr % radius != 0 || dstc <= 0 || dstr <= 0 || dstc >= inimg.imgMeta.width - 1 || dstr >= inimg.imgMeta.height - 1) return; // 用来保存临时像素点的坐标的 x 和 y 分量 int dx, dy; // 用来记录当前模版所在位置的指针 int *curtplptr; // 用来记录当前输入图像所在位置的指针 unsigned char *curinptr; // 圆周上的图像直方图 histogram1 unsigned int histogram1[256] = { 0 }; // 圆内的图像直方图 histogram2 unsigned int histogram2[256] = { 0 }; // 计数器,用来记录某点在圆周上和园内拥有的点的个数 int statistic = 0; unsigned int pix; // 局部变量,临时存储像素值 // 指定当前环形模版所在位置 curtplptr = archtpl.tplData; // 扫描环形模版范围内的每个输入图像的像素点 for (int i = 0; i < archtpl.count; i++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的 // 数组表示一个点,所以使用当前模版位置的指针加一操作 dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // 先判断当前像素的 x 分量,y 分量是否越界,如果越界,则跳过,扫描 // 下一个模版点,如果没有越界,则分别处理当前列的相邻的 4 个像素 if (dx >= 0 && dx < inimg.imgMeta.width && dy >= 0 && dy < inimg.imgMeta.height) { // 根据 dx 和 dy 获取像素的指针 curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; pix = *(curinptr); histogram1[pix]++; statistic++; } } // 如果圆周领域内的的点个数为 0 这直接返回 if(statistic == 0) return; // 指定当前圆形模版所在位置 curtplptr = radtpl.tplData; // 扫描环形模版范围内的每个输入图像的像素点 for (int i = 0; i < radtpl.count; i++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的 // 数组表示一个点,所以使用当前模版位置的指针加一操作 dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // 先判断当前像素的 x 分量,y 分量是否越界,如果越界,则跳过,扫描 // 下一个模版点,如果没有越界,则分别处理当前列的相邻的 4 个像素 if (dx >= 0 && dx < inimg.imgMeta.width) { // 根据 dx 和 dy 获取第一个像素的指针 curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; pix = *(curinptr); histogram2[pix]++; } } // 存储以四个像素圆心得到两直方图的匹配最大值 float maxmatchvalue = 0.0f; // 得到四个像素的两直方图的匹配最大值 _getMaxMatchValueDev(histogram1, histogram2, maxmatchvalue, length, 256); // 计算输出坐标点对应的图像数据数组下标。 int index; // 根据匹配差与阈值大小对符合条件像素点对其圆周上点进行排序, // 取中间 50% 灰度平均,给输出点累加和累加赋值 // 如果匹配差大于给定阈值,则对圆形模板里的所有点赋平均值 if (1 - maxmatchvalue > matchErrTh) { // 存储圆周上的图像值的中值平均(取排序后中间 50% 平均) float mean; // 去掉排序结果中前端的数量 int lownum = (int)(statistic * 0.25f + 0.5f); // 去掉排序结果中末端端的数量 int highnum = (int)(statistic * 0.25f + 0.5f); // 对直方图前后端个数统计 int lowcount = 0, highcount = 0; // 在前后端统计时,中间段少加的值 int lowvalue = 0, highvalue = 0; // 前后端统计时的开关 bool lowmask = false, highmask = false; // 直方图中间段的两端索引 int lowindex = 0, highindex = 0; for (int k = 0; k < 256; k++) { // 计算直方图前端的个数 lowcount += histogram1[k]; if (!lowmask && lowcount >= lownum) { lowindex = k + 1; lowvalue = (lowcount - lownum) * k; lowmask = true; } // 直方图后端的循环索引 int high = 255 - k; // 计算直方图后端的个数 highcount += histogram1[high]; if (!highmask && highcount >= highnum) { highindex = high - 1; highvalue = (highcount - highnum) * high; highmask = true; } // 如果前后端开关都打开,表示都找到了对应位置,就退出循环 if (lowmask && highmask) break; } // 如果 lowindex 大于 highindex,表示没有要处理的元素,则返回 if (lowindex > highindex) return; // 计算领域内的像素值总和 float tmpsum = (float)(lowvalue + highvalue); for (int k = lowindex; k <= highindex; k++) tmpsum += k * histogram1[k]; // 计算平均值 mean = tmpsum / (statistic - lownum - highnum); // 指定当前圆形模版所在位置 curtplptr = radtpl.tplData; // 扫描圆形模版范围内的每个输入图像的像素点 for (int j = 0; j < radtpl.count; j++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个 // 下标的数组表示一个点,所以使用当前模版位置的指针加一操作 dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // 根据 dx 和 dy 获取像素下标 dy++; index = dx + dy * inimg.imgMeta.width; // 如果没有越界,则分别处理当前列的相邻的符合条件的像素 // 给累加和累加平均值,累加次数相应加 1 if (dx >= 0 && dx < inimg.imgMeta.width && dy >= 0 && dy < inimg.imgMeta.height) { atomicAdd(&sum[index], mean); atomicAdd(&count[index], 1); } } } } // Host 成员方法:freckleFilter(广义的中值滤波) __host__ int FreckleFilter::freckleFilter(Image *inimg, Image *outimg) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 开关错误检查,如果既不是开选择也不是闭选择,则返回错误 if (select != FRECKLE_OPEN && select != FRECKLE_CLOSE) return INVALID_DATA; int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 定义模板 radtpl 用于获取圆形领域模板 Template *radtpl; // 定义圆形模板的尺寸 dim3 radsize(this->radius * 2 + 1, this->radius * 2 + 1, 1); // 通过模板工厂得到圆形领域模板 errcode = TemplateFactory::getTemplate(&radtpl, TF_SHAPE_CIRCLE, radsize, NULL); // 检查圆形模板是否为 NULL,如果为 NULL 直接报错返回。 if (errcode != NO_ERROR) return errcode; // 将模板拷贝到 Device 内存中 errcode = TemplateBasicOp::copyToCurrentDevice(radtpl); if (errcode != NO_ERROR) { // 放回 radtpl 模板 TemplateFactory::putTemplate(radtpl); return errcode; } // 定义模板 archtpl 用于获取环形领域模板 Template *archtpl; // 定义环形模板的尺寸 dim3 arcsize(this->radius * 2 + 1, (this->radius + 4) * 2 + 1, 1); // 得到环形领域模板 errcode = TemplateFactory::getTemplate(&archtpl, TF_SHAPE_ARC, arcsize, NULL); // 检查环形模板是否为 NULL,如果为 NULL 报错返回。 if (errcode != NO_ERROR) { // 放回 radtpl 模板 TemplateFactory::putTemplate(radtpl); return errcode; } // 将模板拷贝到 Device 内存中 errcode = TemplateBasicOp::copyToCurrentDevice(archtpl); if (errcode != NO_ERROR) { // 放回模板 TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize1, gridsize2; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize1.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize1.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); gridsize2.x = gridsize1.x; gridsize2.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 得到要处理的像素总个数 size_t datasize = outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height; cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 定义 sum 全局变量指针,申请一个 outsubimgCud.imgMeta.width * // outsubimgCud.imgMeta.height 的 float 型数组,用于存储每点像素平均值累加 // 总和。 float *sum; // 定义 count 全局变量指针,申请一个 outsubimgCud.imgMeta.width * // outsubimgCud.imgMeta.height 的 int 型数组,用于存储每点像素平均值累加 // 次数。 int *count; // 定义局部变量,用于多份数据的一份申请 void *temp_dev; // 在设备端申请内存,然后分配给各个变量 cuerrcode = cudaMalloc( (void **)&temp_dev, datasize * sizeof (float) + datasize * sizeof (int)); if (cuerrcode != cudaSuccess) { // 放回模板 TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); return CUDA_ERROR; } // 为变量分配内存 sum = (float *)temp_dev; count = (int *)(sum + datasize); // 初始化累加和的所有值为 0 cuerrcode = cudaMemset(sum, 0, datasize * sizeof (float)); if (cuerrcode != cudaSuccess) { // 放回模板 TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // 释放累加和与累加次数的总空间 cudaFree(temp_dev); return CUDA_ERROR; } // 初始化累加次数的所有值为 0 cuerrcode = cudaMemset(count, 0, datasize * sizeof (int)); if (cuerrcode != cudaSuccess) { // 放回模板 TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // 释放累加和与累加次数的总空间 cudaFree(temp_dev); return CUDA_ERROR; } if (method == FRECKLE_VAR_TH) { // 若方法为方差阈值法,则调用相应方差阈值法的 Kernel 获得 // 输出图像的每点像素平均值累加总和与累加次数。 _freckleFilterByVarSumCountKer<<<gridsize1, blocksize>>>( insubimgCud, *radtpl, *archtpl, this->varTh, sum, count); } else if (method == FRECKLE_MATCH_ERRTH) { // 若方法为相似度匹配法,则调用相应相似度匹配法的 Kernel 获得 // 输出图像的每点像素平均值累加总和与累加次数。 _freckleFilterByStrMscKer<<<gridsize2, blocksize>>>( insubimgCud, *radtpl, *archtpl, this->matchErrTh, this->length, this->radius, sum, count); } else { // method 错误检查,进入这条分支表示没有外部方法设置有误 return INVALID_DATA; } // 检查核函数运行是否出错 if (cudaGetLastError() != cudaSuccess) { // 放回模板 TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // 释放累加和与累加次数的总空间 cudaFree(temp_dev); return CUDA_ERROR; } // 放回模板 TemplateFactory::putTemplate(radtpl); TemplateFactory::putTemplate(archtpl); // 调用 Kernel 函数实现给输出图像设定像素值。 _freckleFilterSetPixelKer<<<gridsize1, blocksize>>>( insubimgCud, outsubimgCud, sum, count, this->select); // 检查核函数运行是否出错 if (cudaGetLastError() != cudaSuccess) { // 释放累加和与累加次数的总空间 cudaFree(temp_dev); return CUDA_ERROR; } // 释放累加和与累加次数的总空间 cudaFree(temp_dev); // 处理完毕,退出。 return NO_ERROR; }
bb59a964c5ce69e9b345ae9b8a38426e66400128.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Header2.hpp" using namespace CR2; __global__ void CR2::addKernel(int *c, const int *a, const int *b) { hipStream_t stream; hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); int i = threadIdx.x; c[i] = a[i] + b[i]; hipDeviceSynchronize(); }
bb59a964c5ce69e9b345ae9b8a38426e66400128.cu
 #include "Header2.hpp" using namespace CR2; __global__ void CR2::addKernel(int *c, const int *a, const int *b) { cudaStream_t stream; cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); int i = threadIdx.x; c[i] = a[i] + b[i]; cudaDeviceSynchronize(); }
d7b436dd43ffb5c8cdd3e42fa518182c849d4469.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <utilities.h> #include <serial.h> #include <nvidia.h> #include <scanTrans.h> using namespace std; // Global variables int *serialCscRowIdx; int *serialCscColPtr; double *serialCscVal; float serialTime; // Function prototypes void serialAlgo(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); void nvidiaAlgo1(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); void nvidiaAlgo2(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); void scanTrans(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); bool checkAllResults( int *scanTransCscRowIdx, int *scanTransCscColPtr, double *scanTransCscVal,int n, int nnz); bool checkResultsIsWrong(int m, int *arrayA, int *arrayB); bool checkResultsIsWrong(int m, double *arrayA, double *arrayB); int main(int argc, char **argv) { char *filename = detectFile(argc, argv[1]); int m; int n; int nnz; int *csrRowPtr; int *csrColIdx; double *csrVal; readMatrix(filename, m, n, nnz, csrRowPtr, csrColIdx, csrVal); // Launch of the various algorithms serialAlgo(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; nvidiaAlgo1(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; nvidiaAlgo2(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; scanTrans(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; // Cleaning free(csrRowPtr); free(csrColIdx); free(csrVal); free(serialCscRowIdx); free(serialCscColPtr); free(serialCscVal); return 0; } void serialAlgo(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { serialCscRowIdx = (int *)malloc(nnz * sizeof(int)); serialCscColPtr = (int *)malloc((n + 1) * sizeof(int)); serialCscVal = (double *)malloc(nnz * sizeof(double)); serialTime = performTransposition( serial, m, n, nnz, csrRowPtr, csrColIdx, csrVal, serialCscColPtr, serialCscRowIdx, serialCscVal ); } void nvidiaAlgo1(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { int *nvidiaAlgo1CscRowIdx = (int *)malloc(nnz * sizeof(int)); int *nvidiaAlgo1CscColPtr = (int *)malloc((n + 1) * sizeof(int)); double *nvidiaAlgo1CscVal = (double *)malloc(nnz * sizeof(double)); float nvidiaAlgo1Time = performTransposition( nvidia, m, n, nnz, csrRowPtr, csrColIdx, csrVal, nvidiaAlgo1CscColPtr, nvidiaAlgo1CscRowIdx, nvidiaAlgo1CscVal ); if(nvidiaAlgo1Time == -1) { cout << "GPU Sparse Matrix Transpostion ALGO1: memory is too low" << endl; cout << "ALGO1 speedup: -" << endl; } else { cout << setprecision(1) << "ALGO1 speedup: " << serialTime / nvidiaAlgo1Time << "x" << endl; } // Cleaning device hipDeviceReset(); free(nvidiaAlgo1CscRowIdx); free(nvidiaAlgo1CscColPtr); free(nvidiaAlgo1CscVal); } void nvidiaAlgo2(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { int *nvidiaAlgo2CscRowIdx = (int *)malloc(nnz * sizeof(int)); int *nvidiaAlgo2CscColPtr = (int *)malloc((n + 1) * sizeof(int)); double *nvidiaAlgo2CscVal = (double *)malloc(nnz * sizeof(double)); float nvidiaAlgo2Time = performTransposition( nvidia2, m, n, nnz, csrRowPtr, csrColIdx, csrVal, nvidiaAlgo2CscColPtr, nvidiaAlgo2CscRowIdx, nvidiaAlgo2CscVal ); if(nvidiaAlgo2Time == -1) { cout << "GPU Sparse Matrix Transpostion ALGO2: memory is too low" << endl; cout << "ALGO2 speedup: -" << endl; } else { cout << setprecision(1) << "ALGO2 speedup: " << serialTime / nvidiaAlgo2Time << "x" << endl; } // Cleaning device hipDeviceReset(); free(nvidiaAlgo2CscColPtr); free(nvidiaAlgo2CscRowIdx); free(nvidiaAlgo2CscVal); } void scanTrans(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { int *cscRowIdx = (int *)malloc(nnz * sizeof(int)); int *cscColPtr = (int *)malloc((n + 1) * sizeof(int)); double *cscVal = (double *)malloc(nnz * sizeof(double)); float scanTransTime = performTransposition( scanTrans, m, n, nnz, csrRowPtr, csrColIdx, csrVal, cscColPtr, cscRowIdx, cscVal ); if(scanTransTime == -1) { cout << "GPU Sparse Matrix Transpostion ScanTrans: memory is too low" << endl; cout << "ScanTrans wrong: -" << endl; cout << "ScanTrans speedup: -" << endl; } else { bool isWrong = checkAllResults( cscRowIdx, cscColPtr, cscVal, n, nnz ); cout << "ScanTrans wrong: " << isWrong << endl; if(isWrong) exit(EXIT_FAILURE); else cout << setprecision(1) << "ScanTrans speedup: " << serialTime / scanTransTime << "x" << endl; } // Cleaning device hipDeviceReset(); free(cscRowIdx); free(cscColPtr); free(cscVal); } bool checkAllResults( int *scanTransCscRowIdx, int *scanTransCscColPtr, double *scanTransCscVal, int n, int nnz ) { bool isWrong = false; isWrong = checkResultsIsWrong(n + 1, serialCscColPtr, scanTransCscColPtr); if(isWrong) return isWrong; isWrong = checkResultsIsWrong(nnz, serialCscRowIdx, scanTransCscRowIdx); if(isWrong) return isWrong; isWrong = checkResultsIsWrong(nnz, serialCscVal, scanTransCscVal); if(isWrong) return isWrong; } bool checkResultsIsWrong(int m, int *arrayA, int *arrayB) { for (int i = 0; i < m; i++) { if (arrayA[i] != arrayB[i]) { hipDeviceReset(); return true; } } return false; } bool checkResultsIsWrong(int m, double *arrayA, double *arrayB) { for (int i = 0; i < m; i++) { if (arrayA[i] != arrayB[i]) { hipDeviceReset(); return true; } } return false; }
d7b436dd43ffb5c8cdd3e42fa518182c849d4469.cu
#include <iostream> #include <iomanip> #include <utilities.h> #include <serial.h> #include <nvidia.h> #include <scanTrans.h> using namespace std; // Global variables int *serialCscRowIdx; int *serialCscColPtr; double *serialCscVal; float serialTime; // Function prototypes void serialAlgo(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); void nvidiaAlgo1(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); void nvidiaAlgo2(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); void scanTrans(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal); bool checkAllResults( int *scanTransCscRowIdx, int *scanTransCscColPtr, double *scanTransCscVal,int n, int nnz); bool checkResultsIsWrong(int m, int *arrayA, int *arrayB); bool checkResultsIsWrong(int m, double *arrayA, double *arrayB); int main(int argc, char **argv) { char *filename = detectFile(argc, argv[1]); int m; int n; int nnz; int *csrRowPtr; int *csrColIdx; double *csrVal; readMatrix(filename, m, n, nnz, csrRowPtr, csrColIdx, csrVal); // Launch of the various algorithms serialAlgo(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; nvidiaAlgo1(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; nvidiaAlgo2(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; scanTrans(m, n, nnz, csrRowPtr, csrColIdx, csrVal); cout << endl; // Cleaning free(csrRowPtr); free(csrColIdx); free(csrVal); free(serialCscRowIdx); free(serialCscColPtr); free(serialCscVal); return 0; } void serialAlgo(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { serialCscRowIdx = (int *)malloc(nnz * sizeof(int)); serialCscColPtr = (int *)malloc((n + 1) * sizeof(int)); serialCscVal = (double *)malloc(nnz * sizeof(double)); serialTime = performTransposition( serial, m, n, nnz, csrRowPtr, csrColIdx, csrVal, serialCscColPtr, serialCscRowIdx, serialCscVal ); } void nvidiaAlgo1(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { int *nvidiaAlgo1CscRowIdx = (int *)malloc(nnz * sizeof(int)); int *nvidiaAlgo1CscColPtr = (int *)malloc((n + 1) * sizeof(int)); double *nvidiaAlgo1CscVal = (double *)malloc(nnz * sizeof(double)); float nvidiaAlgo1Time = performTransposition( nvidia, m, n, nnz, csrRowPtr, csrColIdx, csrVal, nvidiaAlgo1CscColPtr, nvidiaAlgo1CscRowIdx, nvidiaAlgo1CscVal ); if(nvidiaAlgo1Time == -1) { cout << "GPU Sparse Matrix Transpostion ALGO1: memory is too low" << endl; cout << "ALGO1 speedup: -" << endl; } else { cout << setprecision(1) << "ALGO1 speedup: " << serialTime / nvidiaAlgo1Time << "x" << endl; } // Cleaning device cudaDeviceReset(); free(nvidiaAlgo1CscRowIdx); free(nvidiaAlgo1CscColPtr); free(nvidiaAlgo1CscVal); } void nvidiaAlgo2(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { int *nvidiaAlgo2CscRowIdx = (int *)malloc(nnz * sizeof(int)); int *nvidiaAlgo2CscColPtr = (int *)malloc((n + 1) * sizeof(int)); double *nvidiaAlgo2CscVal = (double *)malloc(nnz * sizeof(double)); float nvidiaAlgo2Time = performTransposition( nvidia2, m, n, nnz, csrRowPtr, csrColIdx, csrVal, nvidiaAlgo2CscColPtr, nvidiaAlgo2CscRowIdx, nvidiaAlgo2CscVal ); if(nvidiaAlgo2Time == -1) { cout << "GPU Sparse Matrix Transpostion ALGO2: memory is too low" << endl; cout << "ALGO2 speedup: -" << endl; } else { cout << setprecision(1) << "ALGO2 speedup: " << serialTime / nvidiaAlgo2Time << "x" << endl; } // Cleaning device cudaDeviceReset(); free(nvidiaAlgo2CscColPtr); free(nvidiaAlgo2CscRowIdx); free(nvidiaAlgo2CscVal); } void scanTrans(int m, int n, int nnz, int *csrRowPtr, int *csrColIdx, double *csrVal) { int *cscRowIdx = (int *)malloc(nnz * sizeof(int)); int *cscColPtr = (int *)malloc((n + 1) * sizeof(int)); double *cscVal = (double *)malloc(nnz * sizeof(double)); float scanTransTime = performTransposition( scanTrans, m, n, nnz, csrRowPtr, csrColIdx, csrVal, cscColPtr, cscRowIdx, cscVal ); if(scanTransTime == -1) { cout << "GPU Sparse Matrix Transpostion ScanTrans: memory is too low" << endl; cout << "ScanTrans wrong: -" << endl; cout << "ScanTrans speedup: -" << endl; } else { bool isWrong = checkAllResults( cscRowIdx, cscColPtr, cscVal, n, nnz ); cout << "ScanTrans wrong: " << isWrong << endl; if(isWrong) exit(EXIT_FAILURE); else cout << setprecision(1) << "ScanTrans speedup: " << serialTime / scanTransTime << "x" << endl; } // Cleaning device cudaDeviceReset(); free(cscRowIdx); free(cscColPtr); free(cscVal); } bool checkAllResults( int *scanTransCscRowIdx, int *scanTransCscColPtr, double *scanTransCscVal, int n, int nnz ) { bool isWrong = false; isWrong = checkResultsIsWrong(n + 1, serialCscColPtr, scanTransCscColPtr); if(isWrong) return isWrong; isWrong = checkResultsIsWrong(nnz, serialCscRowIdx, scanTransCscRowIdx); if(isWrong) return isWrong; isWrong = checkResultsIsWrong(nnz, serialCscVal, scanTransCscVal); if(isWrong) return isWrong; } bool checkResultsIsWrong(int m, int *arrayA, int *arrayB) { for (int i = 0; i < m; i++) { if (arrayA[i] != arrayB[i]) { cudaDeviceReset(); return true; } } return false; } bool checkResultsIsWrong(int m, double *arrayA, double *arrayB) { for (int i = 0; i < m; i++) { if (arrayA[i] != arrayB[i]) { cudaDeviceReset(); return true; } } return false; }
26880e9b71169f083b405d7649615804d4b665ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <chrono> #define BLOCK_SIZE 16 // __global__ void matrixMult(const double *A, const double *B, double *C, int n) { int ai = n * (blockDim.y * blockIdx.y + threadIdx.y); // A int bj = blockDim.x * blockIdx.x + threadIdx.x; // B double sum = 0; for (int k = 0; k < n; k++) sum += A[ai + k] * B[k * n + bj]; int index = n * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x; // C C[index] = sum; } // __global__ void matrixMultShared(double* A, double* B, double* C, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = n * BLOCK_SIZE * by; // A, int aEnd = aBegin + n - 1; // A int aStep = BLOCK_SIZE; // , A int bBegin = BLOCK_SIZE * bx; // B, int bStep = BLOCK_SIZE * n; // , B double Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // // __shared__ __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + n * ty + tx]; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; __syncthreads(); } int c = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + n * ty + tx] = Csub; } // double * generateRandMatrix(int n, size_t sizeMatrix) { double * matrix = (double *)malloc(sizeMatrix); for (int i = 0; i < n * n; i++) { matrix[i] = (double)rand() / (double)RAND_MAX; } return matrix; } void printMatrix(double * matrix, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { printf("%4.1lf ", matrix[i*n + j]); } printf("\n"); } } // void matrixMultCPU(double* A, double* B, double * C, int n) { for (int i = 0; i<n; i++) { for (int j = 0; j<n; j++) { for (int k = 0; k<n; k++) { C[i*n + j] += A[i*n + k] * B[k*n + j]; } } } } // bool checkMult(double * C1, double * C2, int n) { double accuracy = 1.e-6; for (int i = 0; i < n*n; i++) { if (abs(C1[i] - C2[i]) >= accuracy) return false; } return true; } int main(int argc, char *argv[]) { int N = atoi(argv[1]); int flag_s = atoi(argv[2]); if (N % 16 != 0) { printf("The number is not a multiple of the block size. The program will be closed.\n"); system("pause"); exit(1); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); srand(time(NULL)); size_t sizeMatrix = sizeof(double) * N * N; double * h_A = generateRandMatrix(N, sizeMatrix); double * h_B = generateRandMatrix(N, sizeMatrix); double * h_C = (double *)malloc(sizeMatrix); double * h_C_seq = (double *)malloc(sizeMatrix); for (int i = 0; i<N*N; i++) { h_C_seq[i] = 0; } using namespace std::chrono; high_resolution_clock::time_point t1 = high_resolution_clock::now(); matrixMultCPU(h_A, h_B, h_C_seq, N); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double, std::milli> time_span = t2 - t1; double cpu_time = time_span.count(); printf("The time: %f milliseconds\n", cpu_time); double *d_A; hipMalloc((void **)&d_A, sizeMatrix); double *d_B; hipMalloc((void **)&d_B, sizeMatrix); double * d_C; hipMalloc((void **)&d_C, sizeMatrix); hipMemcpy(d_A, h_A, sizeMatrix, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, sizeMatrix, hipMemcpyHostToDevice); dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid = dim3(N / BLOCK_SIZE, N / BLOCK_SIZE); if (flag_s) { hipEventRecord(start, 0); hipLaunchKernelGGL(( matrixMultShared), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N); hipEventRecord(stop, 0); hipEventSynchronize(stop); } else { hipEventRecord(start, 0); hipLaunchKernelGGL(( matrixMult), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N); hipEventRecord(stop, 0); hipEventSynchronize(stop); } float KernelTime; hipEventElapsedTime(&KernelTime, start, stop); printf("KernelTime: %f milliseconds\n", KernelTime); double S = cpu_time / KernelTime; printf("Acceleration: %f\n", S); hipMemcpy(h_C, d_C, sizeMatrix, hipMemcpyDeviceToHost); if (checkMult(h_C, h_C_seq, N)) printf("The multiplication results are correct.\n"); else printf("Multiplication results are NOT correct.\n"); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); free(h_C_seq); return 0; }
26880e9b71169f083b405d7649615804d4b665ee.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <chrono> #define BLOCK_SIZE 16 //функция ядра __global__ void matrixMult(const double *A, const double *B, double *C, int n) { int ai = n * (blockDim.y * blockIdx.y + threadIdx.y); // индекс начала строки матрицы A int bj = blockDim.x * blockIdx.x + threadIdx.x; // индекс начала строки матрицы B double sum = 0; for (int k = 0; k < n; k++) sum += A[ai + k] * B[k * n + bj]; int index = n * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x; // индекс вычисляемого элемента матрицы C C[index] = sum; } //функция ядра с разделяемой памятью __global__ void matrixMultShared(double* A, double* B, double* C, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = n * BLOCK_SIZE * by; // индекс первой подматрицы A, обработанной блоком int aEnd = aBegin + n - 1; // индекс пследней подматрицы A int aStep = BLOCK_SIZE; // размер шага, используемый для итерации подматриц A int bBegin = BLOCK_SIZE * bx; // индекс первой подматрицы B, обработанной блоком int bStep = BLOCK_SIZE * n; // размер шага, используемый для итерации подматриц B double Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // объявление массивов для хранения подматриц в разделяемой памяти // с помощью модификатора __shared__ __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + n * ty + tx]; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; __syncthreads(); } int c = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + n * ty + tx] = Csub; } // генерация матриц double * generateRandMatrix(int n, size_t sizeMatrix) { double * matrix = (double *)malloc(sizeMatrix); for (int i = 0; i < n * n; i++) { matrix[i] = (double)rand() / (double)RAND_MAX; } return matrix; } void printMatrix(double * matrix, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { printf("%4.1lf ", matrix[i*n + j]); } printf("\n"); } } // функция для последовательного варианта умножения матриц void matrixMultCPU(double* A, double* B, double * C, int n) { for (int i = 0; i<n; i++) { for (int j = 0; j<n; j++) { for (int k = 0; k<n; k++) { C[i*n + j] += A[i*n + k] * B[k*n + j]; } } } } // проверка результатов умножения bool checkMult(double * C1, double * C2, int n) { double accuracy = 1.e-6; for (int i = 0; i < n*n; i++) { if (abs(C1[i] - C2[i]) >= accuracy) return false; } return true; } int main(int argc, char *argv[]) { int N = atoi(argv[1]); int flag_s = atoi(argv[2]); if (N % 16 != 0) { printf("The number is not a multiple of the block size. The program will be closed.\n"); system("pause"); exit(1); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); srand(time(NULL)); size_t sizeMatrix = sizeof(double) * N * N; double * h_A = generateRandMatrix(N, sizeMatrix); double * h_B = generateRandMatrix(N, sizeMatrix); double * h_C = (double *)malloc(sizeMatrix); double * h_C_seq = (double *)malloc(sizeMatrix); for (int i = 0; i<N*N; i++) { h_C_seq[i] = 0; } using namespace std::chrono; high_resolution_clock::time_point t1 = high_resolution_clock::now(); matrixMultCPU(h_A, h_B, h_C_seq, N); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double, std::milli> time_span = t2 - t1; double cpu_time = time_span.count(); printf("The time: %f milliseconds\n", cpu_time); double *d_A; cudaMalloc((void **)&d_A, sizeMatrix); double *d_B; cudaMalloc((void **)&d_B, sizeMatrix); double * d_C; cudaMalloc((void **)&d_C, sizeMatrix); cudaMemcpy(d_A, h_A, sizeMatrix, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeMatrix, cudaMemcpyHostToDevice); dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid = dim3(N / BLOCK_SIZE, N / BLOCK_SIZE); if (flag_s) { cudaEventRecord(start, 0); matrixMultShared<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } else { cudaEventRecord(start, 0); matrixMult<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } float KernelTime; cudaEventElapsedTime(&KernelTime, start, stop); printf("KernelTime: %f milliseconds\n", KernelTime); double S = cpu_time / KernelTime; printf("Acceleration: %f\n", S); cudaMemcpy(h_C, d_C, sizeMatrix, cudaMemcpyDeviceToHost); if (checkMult(h_C, h_C_seq, N)) printf("The multiplication results are correct.\n"); else printf("Multiplication results are NOT correct.\n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); free(h_C_seq); return 0; }
c5faff787320b412941881b70a4f31786f5c59ff.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <helper_cuda.h> __global__ void DetectorPowerDetectKernel(float* field, float* intensity, int M_Fields, int M_Camera) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offsetView = indx + M_Camera * indy; int offset = (indx + 16) + M_Fields * (indy + 16); intensity[offsetView] = field[offset] * field[offset]; } __global__ void DetectorFieldDetectKernel(float* field, float* intensity, int M_Fields, int M_Camera) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offsetView = indx + M_Camera * indy; int offset = (indx + 16) + M_Fields * (indy + 16); intensity[offsetView] = field[offset]; } __global__ void DetectorPowerDrawPixelKernel(uchar4* ptr, float* intensity, float contrust) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (1) { int offset = x + y * blockDim.x * gridDim.x; uchar4 color = { 0, 255, 0, 255}; float greenf = intensity[offset] / contrust; if (greenf < 1.0f) color.y = (unsigned char)roundf(255.0f * greenf); ptr[offset] = color; } } __global__ void DetectorFieldDrawPixelKernel(uchar4* ptr, float* intensity, float contrust) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float k = intensity[offset]; if (k >= 0) { uchar4 color = { 255, 0, 0, 255 }; k = k * k / contrust; if (k < 1.0f) color.x = (unsigned char)roundf(255.0f * k); ptr[offset] = color; } else { uchar4 color = { 0, 0, 255, 255 }; k = k * k / contrust; if (k < 1.0f) color.z = (unsigned char)roundf(255.0f * k); ptr[offset] = color; } } void DetectorDetectInterface(float* field, float* intensity, int M_field, int M_camera, dim3 grids, dim3 threads, bool ispower) { if (ispower) DetectorPowerDetectKernel << <grids, threads >> > (field, intensity, M_field, M_camera); else DetectorFieldDetectKernel << <grids, threads >> > (field, intensity, M_field, M_camera); } void DetectorDrawPixelInterface(uchar4* ptr, float* intensity, float contrust, dim3 grids, dim3 threads, bool ispower) { if (ispower) DetectorPowerDrawPixelKernel << <grids, threads >> > (ptr, intensity, contrust); else hipLaunchKernelGGL(( DetectorFieldDrawPixelKernel) , dim3(grids), dim3(threads), 0, 0, ptr, intensity, contrust); }
c5faff787320b412941881b70a4f31786f5c59ff.cu
#include <cuda_runtime.h> #include <helper_cuda.h> __global__ void DetectorPowerDetectKernel(float* field, float* intensity, int M_Fields, int M_Camera) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offsetView = indx + M_Camera * indy; int offset = (indx + 16) + M_Fields * (indy + 16); intensity[offsetView] = field[offset] * field[offset]; } __global__ void DetectorFieldDetectKernel(float* field, float* intensity, int M_Fields, int M_Camera) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offsetView = indx + M_Camera * indy; int offset = (indx + 16) + M_Fields * (indy + 16); intensity[offsetView] = field[offset]; } __global__ void DetectorPowerDrawPixelKernel(uchar4* ptr, float* intensity, float contrust) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (1) { int offset = x + y * blockDim.x * gridDim.x; uchar4 color = { 0, 255, 0, 255}; float greenf = intensity[offset] / contrust; if (greenf < 1.0f) color.y = (unsigned char)roundf(255.0f * greenf); ptr[offset] = color; } } __global__ void DetectorFieldDrawPixelKernel(uchar4* ptr, float* intensity, float contrust) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float k = intensity[offset]; if (k >= 0) { uchar4 color = { 255, 0, 0, 255 }; k = k * k / contrust; if (k < 1.0f) color.x = (unsigned char)roundf(255.0f * k); ptr[offset] = color; } else { uchar4 color = { 0, 0, 255, 255 }; k = k * k / contrust; if (k < 1.0f) color.z = (unsigned char)roundf(255.0f * k); ptr[offset] = color; } } void DetectorDetectInterface(float* field, float* intensity, int M_field, int M_camera, dim3 grids, dim3 threads, bool ispower) { if (ispower) DetectorPowerDetectKernel << <grids, threads >> > (field, intensity, M_field, M_camera); else DetectorFieldDetectKernel << <grids, threads >> > (field, intensity, M_field, M_camera); } void DetectorDrawPixelInterface(uchar4* ptr, float* intensity, float contrust, dim3 grids, dim3 threads, bool ispower) { if (ispower) DetectorPowerDrawPixelKernel << <grids, threads >> > (ptr, intensity, contrust); else DetectorFieldDrawPixelKernel <<<grids, threads>>> (ptr, intensity, contrust); }
9bf7f1e65bfe216fb90fbcdbaf2173a109449bda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "icpcuda/Cuda/internal.h" #include "icpcuda/Cuda/vector_math.hpp" #include "icpcuda/Cuda/containers/safe_call.hpp" __global__ void pyrDownGaussKernel (const PtrStepSz<unsigned short> src, PtrStepSz<unsigned short> dst, float sigma_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; const int D = 5; int center = src.ptr (2 * y)[2 * x]; int x_mi = max(0, 2*x - D/2) - 2*x; int y_mi = max(0, 2*y - D/2) - 2*y; int x_ma = min(src.cols, 2*x -D/2+D) - 2*x; int y_ma = min(src.rows, 2*y -D/2+D) - 2*y; float sum = 0; float wall = 0; float weights[] = {0.375f, 0.25f, 0.0625f} ; for(int yi = y_mi; yi < y_ma; ++yi) for(int xi = x_mi; xi < x_ma; ++xi) { int val = src.ptr (2*y + yi)[2*x + xi]; if (abs (val - center) < 3 * sigma_color) { sum += val * weights[abs(xi)] * weights[abs(yi)]; wall += weights[abs(xi)] * weights[abs(yi)]; } } dst.ptr (y)[x] = static_cast<int>(sum /wall); } void pyrDown(const DeviceArray2D<unsigned short> & src, DeviceArray2D<unsigned short> & dst) { dst.create (src.rows () / 2, src.cols () / 2); dim3 block (32, 8); dim3 grid (divUp (dst.cols (), block.x), divUp (dst.rows (), block.y)); const float sigma_color = 30; hipLaunchKernelGGL(( pyrDownGaussKernel), dim3(grid), dim3(block), 0, 0, src, dst, sigma_color); cudaSafeCall ( hipGetLastError () ); }; __global__ void computeVmapKernel(const PtrStepSz<unsigned short> depth, PtrStep<float> vmap, float fx_inv, float fy_inv, float cx, float cy, float depthCutoff) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u < depth.cols && v < depth.rows) { float z = depth.ptr (v)[u] / 1000.f; // load and convert: mm -> meters if(z != 0 && z < depthCutoff) { float vx = z * (u - cx) * fx_inv; float vy = z * (v - cy) * fy_inv; float vz = z; vmap.ptr (v )[u] = vx; vmap.ptr (v + depth.rows )[u] = vy; vmap.ptr (v + depth.rows * 2)[u] = vz; } else { vmap.ptr (v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } } } void createVMap(const Intr& intr, const DeviceArray2D<unsigned short> & depth, DeviceArray2D<float> & vmap, const float depthCutoff) { vmap.create (depth.rows () * 3, depth.cols ()); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (depth.cols (), block.x); grid.y = divUp (depth.rows (), block.y); float fx = intr.fx, cx = intr.cx; float fy = intr.fy, cy = intr.cy; hipLaunchKernelGGL(( computeVmapKernel), dim3(grid), dim3(block), 0, 0, depth, vmap, 1.f / fx, 1.f / fy, cx, cy, depthCutoff); cudaSafeCall (hipGetLastError ()); } __global__ void computeNmapKernel(int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u >= cols || v >= rows) return; if (u == cols - 1 || v == rows - 1) { nmap.ptr (v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ return; } float3 v00, v01, v10; v00.x = vmap.ptr (v )[u]; v01.x = vmap.ptr (v )[u + 1]; v10.x = vmap.ptr (v + 1)[u]; if (!isnan (v00.x) && !isnan (v01.x) && !isnan (v10.x)) { v00.y = vmap.ptr (v + rows)[u]; v01.y = vmap.ptr (v + rows)[u + 1]; v10.y = vmap.ptr (v + 1 + rows)[u]; v00.z = vmap.ptr (v + 2 * rows)[u]; v01.z = vmap.ptr (v + 2 * rows)[u + 1]; v10.z = vmap.ptr (v + 1 + 2 * rows)[u]; float3 r = normalized (cross (v01 - v00, v10 - v00)); nmap.ptr (v )[u] = r.x; nmap.ptr (v + rows)[u] = r.y; nmap.ptr (v + 2 * rows)[u] = r.z; } else nmap.ptr (v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } void createNMap(const DeviceArray2D<float>& vmap, DeviceArray2D<float>& nmap) { nmap.create (vmap.rows (), vmap.cols ()); int rows = vmap.rows () / 3; int cols = vmap.cols (); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); hipLaunchKernelGGL(( computeNmapKernel), dim3(grid), dim3(block), 0, 0, rows, cols, vmap, nmap); cudaSafeCall (hipGetLastError ()); } __global__ void tranformMapsKernel(int rows, int cols, const PtrStep<float> vmap_src, const PtrStep<float> nmap_src, const Mat33 Rmat, const float3 tvec, PtrStepSz<float> vmap_dst, PtrStep<float> nmap_dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < cols && y < rows) { //vertexes float3 vsrc, vdst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); vsrc.x = vmap_src.ptr (y)[x]; if (!isnan (vsrc.x)) { vsrc.y = vmap_src.ptr (y + rows)[x]; vsrc.z = vmap_src.ptr (y + 2 * rows)[x]; vdst = Rmat * vsrc + tvec; vmap_dst.ptr (y + rows)[x] = vdst.y; vmap_dst.ptr (y + 2 * rows)[x] = vdst.z; } vmap_dst.ptr (y)[x] = vdst.x; //normals float3 nsrc, ndst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); nsrc.x = nmap_src.ptr (y)[x]; if (!isnan (nsrc.x)) { nsrc.y = nmap_src.ptr (y + rows)[x]; nsrc.z = nmap_src.ptr (y + 2 * rows)[x]; ndst = Rmat * nsrc; nmap_dst.ptr (y + rows)[x] = ndst.y; nmap_dst.ptr (y + 2 * rows)[x] = ndst.z; } nmap_dst.ptr (y)[x] = ndst.x; } } void tranformMaps(const DeviceArray2D<float>& vmap_src, const DeviceArray2D<float>& nmap_src, const Mat33& Rmat, const float3& tvec, DeviceArray2D<float>& vmap_dst, DeviceArray2D<float>& nmap_dst) { int cols = vmap_src.cols(); int rows = vmap_src.rows() / 3; vmap_dst.create(rows * 3, cols); nmap_dst.create(rows * 3, cols); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(cols, block.x); grid.y = divUp(rows, block.y); hipLaunchKernelGGL(( tranformMapsKernel), dim3(grid), dim3(block), 0, 0, rows, cols, vmap_src, nmap_src, Rmat, tvec, vmap_dst, nmap_dst); cudaSafeCall(hipGetLastError()); } __global__ void copyMapsKernel(int rows, int cols, const float * vmap_src, const float * nmap_src, PtrStepSz<float> vmap_dst, PtrStep<float> nmap_dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < cols && y < rows) { //vertexes float3 vsrc, vdst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); vsrc.x = vmap_src[y * cols * 4 + (x * 4) + 0]; vsrc.y = vmap_src[y * cols * 4 + (x * 4) + 1]; vsrc.z = vmap_src[y * cols * 4 + (x * 4) + 2]; if(!(vsrc.z == 0)) { vdst = vsrc; } vmap_dst.ptr (y)[x] = vdst.x; vmap_dst.ptr (y + rows)[x] = vdst.y; vmap_dst.ptr (y + 2 * rows)[x] = vdst.z; //normals float3 nsrc, ndst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); nsrc.x = nmap_src[y * cols * 4 + (x * 4) + 0]; nsrc.y = nmap_src[y * cols * 4 + (x * 4) + 1]; nsrc.z = nmap_src[y * cols * 4 + (x * 4) + 2]; if(!(vsrc.z == 0)) { ndst = nsrc; } nmap_dst.ptr (y)[x] = ndst.x; nmap_dst.ptr (y + rows)[x] = ndst.y; nmap_dst.ptr (y + 2 * rows)[x] = ndst.z; } } void copyMaps(const DeviceArray<float>& vmap_src, const DeviceArray<float>& nmap_src, DeviceArray2D<float>& vmap_dst, DeviceArray2D<float>& nmap_dst) { int cols = vmap_dst.cols(); int rows = vmap_dst.rows() / 3; vmap_dst.create(rows * 3, cols); nmap_dst.create(rows * 3, cols); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(cols, block.x); grid.y = divUp(rows, block.y); hipLaunchKernelGGL(( copyMapsKernel), dim3(grid), dim3(block), 0, 0, rows, cols, vmap_src, nmap_src, vmap_dst, nmap_dst); cudaSafeCall(hipGetLastError()); } __global__ void pyrDownKernelGaussF(const PtrStepSz<float> src, PtrStepSz<float> dst, float * gaussKernel) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; const int D = 5; float center = src.ptr (2 * y)[2 * x]; int tx = min (2 * x - D / 2 + D, src.cols - 1); int ty = min (2 * y - D / 2 + D, src.rows - 1); int cy = max (0, 2 * y - D / 2); float sum = 0; int count = 0; for (; cy < ty; ++cy) { for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx) { if(!isnan(src.ptr (cy)[cx])) { sum += src.ptr (cy)[cx] * gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)]; count += gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)]; } } } dst.ptr (y)[x] = (float)(sum / (float)count); } template<bool normalize> __global__ void resizeMapKernel(int drows, int dcols, int srows, const PtrStep<float> input, PtrStep<float> output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dcols || y >= drows) return; const float qnan = __int_as_float(0x7fffffff); int xs = x * 2; int ys = y * 2; float x00 = input.ptr (ys + 0)[xs + 0]; float x01 = input.ptr (ys + 0)[xs + 1]; float x10 = input.ptr (ys + 1)[xs + 0]; float x11 = input.ptr (ys + 1)[xs + 1]; if (isnan (x00) || isnan (x01) || isnan (x10) || isnan (x11)) { output.ptr (y)[x] = qnan; return; } else { float3 n; n.x = (x00 + x01 + x10 + x11) / 4; float y00 = input.ptr (ys + srows + 0)[xs + 0]; float y01 = input.ptr (ys + srows + 0)[xs + 1]; float y10 = input.ptr (ys + srows + 1)[xs + 0]; float y11 = input.ptr (ys + srows + 1)[xs + 1]; n.y = (y00 + y01 + y10 + y11) / 4; float z00 = input.ptr (ys + 2 * srows + 0)[xs + 0]; float z01 = input.ptr (ys + 2 * srows + 0)[xs + 1]; float z10 = input.ptr (ys + 2 * srows + 1)[xs + 0]; float z11 = input.ptr (ys + 2 * srows + 1)[xs + 1]; n.z = (z00 + z01 + z10 + z11) / 4; if (normalize) n = normalized (n); output.ptr (y )[x] = n.x; output.ptr (y + drows)[x] = n.y; output.ptr (y + 2 * drows)[x] = n.z; } } template<bool normalize> void resizeMap(const DeviceArray2D<float>& input, DeviceArray2D<float>& output) { int in_cols = input.cols (); int in_rows = input.rows () / 3; int out_cols = in_cols / 2; int out_rows = in_rows / 2; output.create (out_rows * 3, out_cols); dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); resizeMapKernel<normalize><< < grid, block>>>(out_rows, out_cols, in_rows, input, output); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } void resizeVMap(const DeviceArray2D<float>& input, DeviceArray2D<float>& output) { resizeMap<false>(input, output); } void resizeNMap(const DeviceArray2D<float>& input, DeviceArray2D<float>& output) { resizeMap<true>(input, output); }
9bf7f1e65bfe216fb90fbcdbaf2173a109449bda.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "icpcuda/Cuda/internal.h" #include "icpcuda/Cuda/vector_math.hpp" #include "icpcuda/Cuda/containers/safe_call.hpp" __global__ void pyrDownGaussKernel (const PtrStepSz<unsigned short> src, PtrStepSz<unsigned short> dst, float sigma_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; const int D = 5; int center = src.ptr (2 * y)[2 * x]; int x_mi = max(0, 2*x - D/2) - 2*x; int y_mi = max(0, 2*y - D/2) - 2*y; int x_ma = min(src.cols, 2*x -D/2+D) - 2*x; int y_ma = min(src.rows, 2*y -D/2+D) - 2*y; float sum = 0; float wall = 0; float weights[] = {0.375f, 0.25f, 0.0625f} ; for(int yi = y_mi; yi < y_ma; ++yi) for(int xi = x_mi; xi < x_ma; ++xi) { int val = src.ptr (2*y + yi)[2*x + xi]; if (abs (val - center) < 3 * sigma_color) { sum += val * weights[abs(xi)] * weights[abs(yi)]; wall += weights[abs(xi)] * weights[abs(yi)]; } } dst.ptr (y)[x] = static_cast<int>(sum /wall); } void pyrDown(const DeviceArray2D<unsigned short> & src, DeviceArray2D<unsigned short> & dst) { dst.create (src.rows () / 2, src.cols () / 2); dim3 block (32, 8); dim3 grid (divUp (dst.cols (), block.x), divUp (dst.rows (), block.y)); const float sigma_color = 30; pyrDownGaussKernel<<<grid, block>>>(src, dst, sigma_color); cudaSafeCall ( cudaGetLastError () ); }; __global__ void computeVmapKernel(const PtrStepSz<unsigned short> depth, PtrStep<float> vmap, float fx_inv, float fy_inv, float cx, float cy, float depthCutoff) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u < depth.cols && v < depth.rows) { float z = depth.ptr (v)[u] / 1000.f; // load and convert: mm -> meters if(z != 0 && z < depthCutoff) { float vx = z * (u - cx) * fx_inv; float vy = z * (v - cy) * fy_inv; float vz = z; vmap.ptr (v )[u] = vx; vmap.ptr (v + depth.rows )[u] = vy; vmap.ptr (v + depth.rows * 2)[u] = vz; } else { vmap.ptr (v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } } } void createVMap(const Intr& intr, const DeviceArray2D<unsigned short> & depth, DeviceArray2D<float> & vmap, const float depthCutoff) { vmap.create (depth.rows () * 3, depth.cols ()); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (depth.cols (), block.x); grid.y = divUp (depth.rows (), block.y); float fx = intr.fx, cx = intr.cx; float fy = intr.fy, cy = intr.cy; computeVmapKernel<<<grid, block>>>(depth, vmap, 1.f / fx, 1.f / fy, cx, cy, depthCutoff); cudaSafeCall (cudaGetLastError ()); } __global__ void computeNmapKernel(int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u >= cols || v >= rows) return; if (u == cols - 1 || v == rows - 1) { nmap.ptr (v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ return; } float3 v00, v01, v10; v00.x = vmap.ptr (v )[u]; v01.x = vmap.ptr (v )[u + 1]; v10.x = vmap.ptr (v + 1)[u]; if (!isnan (v00.x) && !isnan (v01.x) && !isnan (v10.x)) { v00.y = vmap.ptr (v + rows)[u]; v01.y = vmap.ptr (v + rows)[u + 1]; v10.y = vmap.ptr (v + 1 + rows)[u]; v00.z = vmap.ptr (v + 2 * rows)[u]; v01.z = vmap.ptr (v + 2 * rows)[u + 1]; v10.z = vmap.ptr (v + 1 + 2 * rows)[u]; float3 r = normalized (cross (v01 - v00, v10 - v00)); nmap.ptr (v )[u] = r.x; nmap.ptr (v + rows)[u] = r.y; nmap.ptr (v + 2 * rows)[u] = r.z; } else nmap.ptr (v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } void createNMap(const DeviceArray2D<float>& vmap, DeviceArray2D<float>& nmap) { nmap.create (vmap.rows (), vmap.cols ()); int rows = vmap.rows () / 3; int cols = vmap.cols (); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); computeNmapKernel<<<grid, block>>>(rows, cols, vmap, nmap); cudaSafeCall (cudaGetLastError ()); } __global__ void tranformMapsKernel(int rows, int cols, const PtrStep<float> vmap_src, const PtrStep<float> nmap_src, const Mat33 Rmat, const float3 tvec, PtrStepSz<float> vmap_dst, PtrStep<float> nmap_dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < cols && y < rows) { //vertexes float3 vsrc, vdst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); vsrc.x = vmap_src.ptr (y)[x]; if (!isnan (vsrc.x)) { vsrc.y = vmap_src.ptr (y + rows)[x]; vsrc.z = vmap_src.ptr (y + 2 * rows)[x]; vdst = Rmat * vsrc + tvec; vmap_dst.ptr (y + rows)[x] = vdst.y; vmap_dst.ptr (y + 2 * rows)[x] = vdst.z; } vmap_dst.ptr (y)[x] = vdst.x; //normals float3 nsrc, ndst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); nsrc.x = nmap_src.ptr (y)[x]; if (!isnan (nsrc.x)) { nsrc.y = nmap_src.ptr (y + rows)[x]; nsrc.z = nmap_src.ptr (y + 2 * rows)[x]; ndst = Rmat * nsrc; nmap_dst.ptr (y + rows)[x] = ndst.y; nmap_dst.ptr (y + 2 * rows)[x] = ndst.z; } nmap_dst.ptr (y)[x] = ndst.x; } } void tranformMaps(const DeviceArray2D<float>& vmap_src, const DeviceArray2D<float>& nmap_src, const Mat33& Rmat, const float3& tvec, DeviceArray2D<float>& vmap_dst, DeviceArray2D<float>& nmap_dst) { int cols = vmap_src.cols(); int rows = vmap_src.rows() / 3; vmap_dst.create(rows * 3, cols); nmap_dst.create(rows * 3, cols); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(cols, block.x); grid.y = divUp(rows, block.y); tranformMapsKernel<<<grid, block>>>(rows, cols, vmap_src, nmap_src, Rmat, tvec, vmap_dst, nmap_dst); cudaSafeCall(cudaGetLastError()); } __global__ void copyMapsKernel(int rows, int cols, const float * vmap_src, const float * nmap_src, PtrStepSz<float> vmap_dst, PtrStep<float> nmap_dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < cols && y < rows) { //vertexes float3 vsrc, vdst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); vsrc.x = vmap_src[y * cols * 4 + (x * 4) + 0]; vsrc.y = vmap_src[y * cols * 4 + (x * 4) + 1]; vsrc.z = vmap_src[y * cols * 4 + (x * 4) + 2]; if(!(vsrc.z == 0)) { vdst = vsrc; } vmap_dst.ptr (y)[x] = vdst.x; vmap_dst.ptr (y + rows)[x] = vdst.y; vmap_dst.ptr (y + 2 * rows)[x] = vdst.z; //normals float3 nsrc, ndst = make_float3 (__int_as_float(0x7fffffff), __int_as_float(0x7fffffff), __int_as_float(0x7fffffff)); nsrc.x = nmap_src[y * cols * 4 + (x * 4) + 0]; nsrc.y = nmap_src[y * cols * 4 + (x * 4) + 1]; nsrc.z = nmap_src[y * cols * 4 + (x * 4) + 2]; if(!(vsrc.z == 0)) { ndst = nsrc; } nmap_dst.ptr (y)[x] = ndst.x; nmap_dst.ptr (y + rows)[x] = ndst.y; nmap_dst.ptr (y + 2 * rows)[x] = ndst.z; } } void copyMaps(const DeviceArray<float>& vmap_src, const DeviceArray<float>& nmap_src, DeviceArray2D<float>& vmap_dst, DeviceArray2D<float>& nmap_dst) { int cols = vmap_dst.cols(); int rows = vmap_dst.rows() / 3; vmap_dst.create(rows * 3, cols); nmap_dst.create(rows * 3, cols); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(cols, block.x); grid.y = divUp(rows, block.y); copyMapsKernel<<<grid, block>>>(rows, cols, vmap_src, nmap_src, vmap_dst, nmap_dst); cudaSafeCall(cudaGetLastError()); } __global__ void pyrDownKernelGaussF(const PtrStepSz<float> src, PtrStepSz<float> dst, float * gaussKernel) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; const int D = 5; float center = src.ptr (2 * y)[2 * x]; int tx = min (2 * x - D / 2 + D, src.cols - 1); int ty = min (2 * y - D / 2 + D, src.rows - 1); int cy = max (0, 2 * y - D / 2); float sum = 0; int count = 0; for (; cy < ty; ++cy) { for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx) { if(!isnan(src.ptr (cy)[cx])) { sum += src.ptr (cy)[cx] * gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)]; count += gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)]; } } } dst.ptr (y)[x] = (float)(sum / (float)count); } template<bool normalize> __global__ void resizeMapKernel(int drows, int dcols, int srows, const PtrStep<float> input, PtrStep<float> output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dcols || y >= drows) return; const float qnan = __int_as_float(0x7fffffff); int xs = x * 2; int ys = y * 2; float x00 = input.ptr (ys + 0)[xs + 0]; float x01 = input.ptr (ys + 0)[xs + 1]; float x10 = input.ptr (ys + 1)[xs + 0]; float x11 = input.ptr (ys + 1)[xs + 1]; if (isnan (x00) || isnan (x01) || isnan (x10) || isnan (x11)) { output.ptr (y)[x] = qnan; return; } else { float3 n; n.x = (x00 + x01 + x10 + x11) / 4; float y00 = input.ptr (ys + srows + 0)[xs + 0]; float y01 = input.ptr (ys + srows + 0)[xs + 1]; float y10 = input.ptr (ys + srows + 1)[xs + 0]; float y11 = input.ptr (ys + srows + 1)[xs + 1]; n.y = (y00 + y01 + y10 + y11) / 4; float z00 = input.ptr (ys + 2 * srows + 0)[xs + 0]; float z01 = input.ptr (ys + 2 * srows + 0)[xs + 1]; float z10 = input.ptr (ys + 2 * srows + 1)[xs + 0]; float z11 = input.ptr (ys + 2 * srows + 1)[xs + 1]; n.z = (z00 + z01 + z10 + z11) / 4; if (normalize) n = normalized (n); output.ptr (y )[x] = n.x; output.ptr (y + drows)[x] = n.y; output.ptr (y + 2 * drows)[x] = n.z; } } template<bool normalize> void resizeMap(const DeviceArray2D<float>& input, DeviceArray2D<float>& output) { int in_cols = input.cols (); int in_rows = input.rows () / 3; int out_cols = in_cols / 2; int out_rows = in_rows / 2; output.create (out_rows * 3, out_cols); dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); resizeMapKernel<normalize><< < grid, block>>>(out_rows, out_cols, in_rows, input, output); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } void resizeVMap(const DeviceArray2D<float>& input, DeviceArray2D<float>& output) { resizeMap<false>(input, output); } void resizeNMap(const DeviceArray2D<float>& input, DeviceArray2D<float>& output) { resizeMap<true>(input, output); }
d15730842d65b81d14520b7153fdbb0638413d20.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <ostream> #include <fstream> #include <sys/time.h> #include <time.h> using namespace std; #define CASENAME "test" #define NUMGPU 1 #define BLOCKSIZEX 32 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define BLOCKSIZELRX 64 #define BLOCKSIZELRY 1 #define BLOCKSIZELRZ 1 #define BLOCKSIZEINTERP 8 #define XDIM 32 #define YDIM 32 #define ZDIM 32 #define TMAX 5000 #define STARTF 0 #define OBSTR1 16.f #define OBSTX1 96.5f #define OBSTY1 127.5f #define OBSTZ1 15.5f #define OBSTR2 16.f #define OBSTX2 224.5f #define OBSTY2 127.5f #define OBSTZ2 31.5f #define LRFACTOR 0.5f #define LRLEVEL 2 #define LRX0 80.25f //minimum x coord of LR #define XLRDIM 128 //number of nodes in x #define LRY0 48.25f #define YLRDIM 128 #define LRZ0 -0.75f #define ZLRDIM 64 #define ORDER 2 //order of accuracy of interpolation //#define LRFACTOR 0.25f //#define LRLEVEL 4 //#define LRX0 80.125f //minimum x coord of LR //#define XLRDIM 256 //number of nodes in x //#define LRY0 48.125f //#define YLRDIM 256 //#define LRZ0 -0.875f //#define ZLRDIM 128 //#define ORDER 2 //order of accuracy of interpolation #define RE 100.f //3000.f//2000.f//100.f; #define UMAX 0.06f #define SmagLES 0 //1,0 #define MODEL "MRT" //BGK,MRT,STREAM #define REFINEMENT 0 //1,0 #define CS 0.02f #define VELAV 1 #define START_VELAV 100000 #define START_VELFLUC 250000 inline __device__ int ImageFcnLR(float x, float y, float z) { int value = 0; // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // value = 10; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // value = 10; if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) { value = 10; } // if(z < 0.5f) // value = 1; // if(z > ZDIM-1-0.5f) // value = 1; return value; } inline __device__ int ImageFcn(int x, int y, int z) { int value = 0; //Cylinder // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // value = 1; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // value = 10; // if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) // value = 10; //Lid Driven Cavity if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1 || x == XDIM-1) value = 1; // else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2) // return 1; else if(x == 0) return 400; // if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) // value = 10; // if(z == 1) // value = 1; // if(z == ZDIM-2) // value = 1; // if(y == 0) // value = 1;//200;//22; // else if(y == YDIM-1) // value = 1;//100; // else if(x == 0) // value = 400;//26; // else if(x == XDIM-1) // //else if(x > 42) // value = 300;//25; // else if(z == 0) // value = 1; // //else if(z == ZDIM-1 || z == ZDIM-2) // else if(z > 41) // value = 1; return value; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-1)*0.5f; float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); return (result); } inline __device__ float PoisProf3D (float x, float y){ x = x-0.5f; y = y-0.5f; float H = 41.f; return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H)); // float radius = (YDIM-1-1)*0.5f; // float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); // return (result); } int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __device__ int dmax(int a,int b) { if (a>b) return a; else return b; } __device__ int dmin_p(int a, int b) { if (a<b) return a; else return 0; } __device__ int dmax_p(int a, int b) { if (a>-1) return a; else return b-1; } inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011, float v100, float v101, float v110, float v111, float x, float y, float z){ return v000*(1.f-x)*(1.f-y)*(1.f-z)+ v001*( x)*(1.f-y)*(1.f-z)+ v010*(1.f-x)*( y)*(1.f-z)+ v011*( x)*( y)*(1.f-z)+ v100*(1.f-x)*(1.f-y)*( z)+ v101*( x)*(1.f-y)*( z)+ v110*(1.f-x)*( y)*( z)+ v111*( x)*( y)*( z); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner) { int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YDIM*(zInner)); return index; } inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner) { int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YLRDIM*(zInner)); return index; } inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner) { int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner)); return index; } inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner) { int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)); return index; } inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch) { int index = (x+y*pitch)+f_num*pitch*YDIM; index = dmax(index); index = dmin(index,19*pitch*YDIM); return index; } inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch) { int index = (x+y*pitch)+f_num*pitch*YLRDIM; index = dmax(index); index = dmin(index,19*pitch*YLRDIM); return index; } inline __device__ void Moments(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void Moments_host(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void InvertMoments_host(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w) { meq[ 0] = rho; meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w); meq[ 2] = 7.53968254f*(u*u+v*v+w*w);; meq[ 3] = u; meq[ 4] = -0.666666667f*u; meq[ 5] = v; meq[ 6] = -0.666666667f*v; meq[ 7] = w; meq[ 8] = -0.666666667f*w; meq[ 9] = 2.f*u*u-(v*v+w*w); meq[11] = v*v-w*w; meq[13] = u*v; meq[14] = v*w; meq[15] = u*w; } //outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f inline __device__ void PhysicalMoments(float* mom, float* f) { mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17]; mom[6] = f[5]+-f[6]+f[7]+-f[8]; mom[7] = f[11]+-f[13]+-f[16]+f[18]; mom[8] = f[10]+-f[12]+-f[15]+f[17]; } inline __device__ void InvertMoments(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF) { float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } //outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15) inline __device__ void StrainRate(float* S, float* m_strain, float omega) { omega = 1.f; float m1 = (-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3])); float m9 = m_strain[4]; float m11= m_strain[5]; float m13= m_strain[6]; float m14= m_strain[7]; float m15= m_strain[8]; S[0] = -0.026315789f*( m1+19.f*omega* m9); S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11)); S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11)); S[3] = -1.5f*omega*m13; S[4] = -1.5f*omega*m14; S[5] = -1.5f*omega*m15; } inline __device__ void mrt_collide(float* f, float omega) { float m[19]; //float u,v,w; m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+ f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]); m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]); m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]); m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]); m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]); m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7])); m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]); m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ; m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5]; m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7]; m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7]; m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ; m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18]; m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18]; if(SmagLES == 1) { // float Pxx = 0.33333333f*(m[1]+2.f*m[0]+m[9]); // float Pyy = Pxx+0.5f*(m[11]-m[9]);//0.3333333f*(m[1]+2.f*m[0]+0.5f*(3.f*m[11]-m[9])); // float Pzz = Pyy-m[11]; // float Q11 = 0.33333333f*(m[0])+m[3]*m[3]-Pxx; // float Q22 = 0.33333333f*(m[0])+m[5]*m[5]-Pyy; // float Q33 = 0.33333333f*(m[0])+m[7]*m[7]-Pzz; // float Q12 = m[3]*m[5]-m[13]; // float Q23 = m[5]*m[7]-m[14]; // float Q13 = m[3]*m[7]-m[15]; //// float Q11 = 0.33333333f*m[0]+m[3]*m[3]-Pxx; //// float Q22 = 0.33333333f*m[0]+m[5]*m[5]-Pyy; //// float Q33 = 0.33333333f*m[0]+m[7]*m[7]-Pzz; //// float Q12 = 0.33333333f*m[0]+m[3]*m[5]-m[13]; //// float Q23 = 0.33333333f*m[0]+m[5]*m[7]-m[14]; //// float Q13 = 0.33333333f*m[0]+m[3]*m[7]-m[15]; float usqr = m[3]*m[3]+m[5]*m[5]+m[7]*m[7]; float u = m[3]; float v = m[5]; float w = m[7]; float rho = m[0]; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f[1 ]-feq1 )+(f[3 ]-feq3 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17); float PI22 = (f[2 ]-feq2 )+(f[4 ]-feq4 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI33 = (f[9 ]-feq9 )+(f[14]-feq14)+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17)+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI12 = (f[5 ]-feq5 )+(f[7 ]-feq7 )-(f[6 ]-feq6 )-(f[8 ]-feq8 ); float PI13 = (f[10]-feq10)+(f[17]-feq17)-(f[12]-feq12)-(f[15]-feq15); float PI23 = (f[11]-feq11)+(f[18]-feq18)-(f[13]-feq13)-(f[16]-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13); float tau0 = 1.f/omega; float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q); omega = 1.f/tau; } f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]); f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); } inline __device__ void North_Extrap(float* f, float rho) { float m[19]; //rho = 1.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void South_Extrap(float* f, float v) { float m[19]; float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void East_Extrap(float* f, float rho) { float m[19]; //rho = 0.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void West_Extrap(float* f, float u, int t) { float m[19]; u = 0.f; float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = UMAX;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; if(t == 1000 || t == 2000 || t == 3000) w = 0.01f; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } __device__ void xsymmetry_bot(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13]=f[18]; f[11]=f[18]; f[16]=f[18]; f[ 6] =f[ 7]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == 0 && z == ZDIM-1){ f[ 4] = f[ 2]; f[11]=f[13]; f[18]=f[13]; f[16]=f[13]; f[ 6] =f[ 7]; f[14]=f[ 9]; f[17]=f[12]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[11]=f[16]; f[18]=f[16]; f[13]=f[16]; f[ 7] =f[ 6]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[16]=f[11]; f[18]=f[11]; f[13]=f[11]; f[ 7] =f[ 6]; f[14]=f[ 9]; f[17]=f[12]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11]=f[13]; f[16]=f[18]; f[ 8] = f[ 5]; } else if(y == YDIM-1){ f[ 4]=f[ 2] ; f[13]=f[11]; f[18]=f[16]; f[ 5]=f[ 8] ; } } f[ 1] = f[ 3] ; f[ 5] = f[ 6] ; f[ 8] = f[ 7] ; f[10]= f[12]; f[15]= f[17]; } __device__ void xsymmetry_top(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13] = f[18]; f[11] = f[18]; f[16] = f[18]; f[ 5] = f[ 8]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == 0 && z == ZDIM-1){ f[ 2] = f[ 4]; f[11] = f[13]; f[18] = f[13]; f[16] = f[13]; f[ 5] = f[ 8]; f[14] = f[ 9]; f[15] = f[10]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[18] = f[16]; f[11] = f[16]; f[13] = f[16]; f[ 8] = f[ 5]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[16] = f[11]; f[18] = f[11]; f[ 8] = f[ 5]; f[14] = f[ 9]; f[15] = f[10]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11] = f[13]; f[16] = f[18]; f[ 5] = f[ 8]; } else if(y == YDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[18] = f[16]; f[ 8] = f[ 5]; } } f[ 3] = f[ 1] ; f[ 6] = f[ 5] ; f[ 7] = f[ 8] ; f[12]= f[10]; f[17]= f[15]; } inline __device__ void vel_av(float* f, float& uAv, float& vAv, int t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1); vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1); } inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR); vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR); } inline __device__ void vel_fluc(float* f, float& uAv, float& vAv, float& ufluc, float& vfluc, int t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1); vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1); } inline __device__ void vel_flucLR(float* f, float& uAv, float& vAv, float& ufluc, float& vfluc, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); } __global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*ZDIM; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcn(xcoord,ycoord,zcoord); float u,v,w,rho; rho = 1.f; u = UMAX; v = 0.01f; w = 0.01f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YDIM*zInner]=f[ i]; } __global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*(zInner+2); xcoord = LRX0+x*LRFACTOR; ycoord = LRY0+y*LRFACTOR; zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z); int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcnLR(xcoord,ycoord,zcoord); float u,v,w,rho; rho = 1.f; u = UMAX; v = 0.0f; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YLRDIM*zInner]=f[ i]; } __global__ void update_top(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_mem(14,x ,y ,pitch)]; f[15]= temp[buff_mem(15,x-1,y ,pitch)]; f[16]= temp[buff_mem(16,x ,y-1,pitch)]; f[17]= temp[buff_mem(17,x+1,y ,pitch)]; f[18]= temp[buff_mem(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_mem(0 ,x,y,pitch)] = f[0 ]; hB[buff_mem(1 ,x,y,pitch)] = f[3 ]; hB[buff_mem(2 ,x,y,pitch)] = f[4 ]; hB[buff_mem(3 ,x,y,pitch)] = f[1 ]; hB[buff_mem(4 ,x,y,pitch)] = f[2 ]; hB[buff_mem(5 ,x,y,pitch)] = f[7 ]; hB[buff_mem(6 ,x,y,pitch)] = f[8 ]; hB[buff_mem(7 ,x,y,pitch)] = f[5 ]; hB[buff_mem(8 ,x,y,pitch)] = f[6 ]; hB[buff_mem(9 ,x,y,pitch)] = f[14]; hB[buff_mem(10,x,y,pitch)] = f[17]; hB[buff_mem(11,x,y,pitch)] = f[18]; hB[buff_mem(12,x,y,pitch)] = f[15]; hB[buff_mem(13,x,y,pitch)] = f[16]; hB[buff_mem(14,x,y,pitch)] = f[9 ]; hB[buff_mem(15,x,y,pitch)] = f[12]; hB[buff_mem(16,x,y,pitch)] = f[13]; hB[buff_mem(17,x,y,pitch)] = f[10]; hB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1); if(im == 26) xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1); mrt_collide(f,omega); for(int i = 0; i<19; i++) hB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ // //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2)); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)]; f[10]= temp[buff_mem(10,x-1,y ,pitch)]; f[11]= temp[buff_mem(11,x ,y-1,pitch)]; f[12]= temp[buff_mem(12,x+1,y ,pitch)]; f[13]= temp[buff_mem(13,x ,y+1,pitch)]; f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)]; f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)]; f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)]; f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)]; f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_mem(0 ,x,y,pitch)] = f[0 ]; gB[buff_mem(1 ,x,y,pitch)] = f[3 ]; gB[buff_mem(2 ,x,y,pitch)] = f[4 ]; gB[buff_mem(3 ,x,y,pitch)] = f[1 ]; gB[buff_mem(4 ,x,y,pitch)] = f[2 ]; gB[buff_mem(5 ,x,y,pitch)] = f[7 ]; gB[buff_mem(6 ,x,y,pitch)] = f[8 ]; gB[buff_mem(7 ,x,y,pitch)] = f[5 ]; gB[buff_mem(8 ,x,y,pitch)] = f[6 ]; gB[buff_mem(9 ,x,y,pitch)] = f[14]; gB[buff_mem(10,x,y,pitch)] = f[17]; gB[buff_mem(11,x,y,pitch)] = f[18]; gB[buff_mem(12,x,y,pitch)] = f[15]; gB[buff_mem(13,x,y,pitch)] = f[16]; gB[buff_mem(14,x,y,pitch)] = f[9 ]; gB[buff_mem(15,x,y,pitch)] = f[12]; gB[buff_mem(16,x,y,pitch)] = f[13]; gB[buff_mem(17,x,y,pitch)] = f[10]; gB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,GPU*(zInner+2)); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)); mrt_collide(f,omega); for(int i = 0; i<19; i++) gB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2)+1+z); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_mem(14,x ,y ,pitch)]; f[15]= h [buff_mem(15,x-1,y ,pitch)]; f[16]= h [buff_mem(16,x ,y-1,pitch)]; f[17]= h [buff_mem(17,x+1,y ,pitch)]; f[18]= h [buff_mem(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_mem(9 ,x ,y ,pitch)]; f[10]= g [buff_mem(10,x-1,y ,pitch)]; f[11]= g [buff_mem(11,x ,y-1,pitch)]; f[12]= g [buff_mem(12,x+1,y ,pitch)]; f[13]= g [buff_mem(13,x ,y+1,pitch)]; f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_mem(10,x,y,z,pitch,zInner)] = f[17]; fB[f_mem(11,x,y,z,pitch,zInner)] = f[18]; fB[f_mem(12,x,y,z,pitch,zInner)] = f[15]; fB[f_mem(13,x,y,z,pitch,zInner)] = f[16]; fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_mem(15,x,y,z,pitch,zInner)] = f[12]; fB[f_mem(16,x,y,z,pitch,zInner)] = f[13]; fB[f_mem(17,x,y,z,pitch,zInner)] = f[10]; fB[f_mem(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)]; float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)+1+z); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)+1+z); mrt_collide(f,omega); if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; vel_av(f,u_Av,v_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM]; vel_fluc(f,u_Av,v_Av,u_fluc,v_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc; } } for(int i = 0; i<19; i++) fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+LRFACTOR*z; int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_memLR(14,x ,y ,pitch)]; f[15]= temp[buff_memLR(15,x-1,y ,pitch)]; f[16]= temp[buff_memLR(16,x ,y-1,pitch)]; f[17]= temp[buff_memLR(17,x+1,y ,pitch)]; f[18]= temp[buff_memLR(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; hB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; hB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; hB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; hB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; hB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; hB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; hB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; hB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; hB[buff_memLR(9 ,x,y,pitch)] = f[14]; hB[buff_memLR(10,x,y,pitch)] = f[17]; hB[buff_memLR(11,x,y,pitch)] = f[18]; hB[buff_memLR(12,x,y,pitch)] = f[15]; hB[buff_memLR(13,x,y,pitch)] = f[16]; hB[buff_memLR(14,x,y,pitch)] = f[9 ]; hB[buff_memLR(15,x,y,pitch)] = f[12]; hB[buff_memLR(16,x,y,pitch)] = f[13]; hB[buff_memLR(17,x,y,pitch)] = f[10]; hB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega); for(int i = 0; i<19; i++) hB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; //int z = (zInner+2)-1; int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; //float zcoord = LRZ0+GPU*LRFACTOR*z; float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1); int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)]; f[10]= temp[buff_memLR(10,x-1,y ,pitch)]; f[11]= temp[buff_memLR(11,x ,y-1,pitch)]; f[12]= temp[buff_memLR(12,x+1,y ,pitch)]; f[13]= temp[buff_memLR(13,x ,y+1,pitch)]; f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)]; f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)]; f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)]; f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)]; f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; gB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; gB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; gB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; gB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; gB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; gB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; gB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; gB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; gB[buff_memLR(9 ,x,y,pitch)] = f[14]; gB[buff_memLR(10,x,y,pitch)] = f[17]; gB[buff_memLR(11,x,y,pitch)] = f[18]; gB[buff_memLR(12,x,y,pitch)] = f[15]; gB[buff_memLR(13,x,y,pitch)] = f[16]; gB[buff_memLR(14,x,y,pitch)] = f[9 ]; gB[buff_memLR(15,x,y,pitch)] = f[12]; gB[buff_memLR(16,x,y,pitch)] = f[13]; gB[buff_memLR(17,x,y,pitch)] = f[10]; gB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega); for(int i = 0; i<19; i++) gB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z)); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_memLR(14,x ,y ,pitch)]; f[15]= h [buff_memLR(15,x-1,y ,pitch)]; f[16]= h [buff_memLR(16,x ,y-1,pitch)]; f[17]= h [buff_memLR(17,x+1,y ,pitch)]; f[18]= h [buff_memLR(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)]; f[10]= g [buff_memLR(10,x-1,y ,pitch)]; f[11]= g [buff_memLR(11,x ,y-1,pitch)]; f[12]= g [buff_memLR(12,x+1,y ,pitch)]; f[13]= g [buff_memLR(13,x ,y+1,pitch)]; f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17]; fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18]; fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15]; fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16]; fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12]; fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13]; fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10]; fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega); if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_avLR(f,u_Av,v_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc; } } for(int i = 0; i<19; i++) fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } /* InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's */ __global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; __shared__ float mom_c[BLOCKSIZEINTERP][2][2][9]; __shared__ float S_c[BLOCKSIZEINTERP][2][2][6]; //int GPU = 0; int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z)); if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and g_temp int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use h and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo int ymax = YLRDIM*LRFACTOR+1; for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner]; // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } syncthreads(); if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){ //if(x<LRLEVEL || x>XLRDIM-LRLEVEL-2 || y<LRLEVEL || y>YLRDIM-LRLEVEL-2){ //interpolate from shared mem int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f); int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f); int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f); int xp = xm+1; //int yp = ym+1; int zp = zm+1; float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm; float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym; float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm; float mom[9]; for(int i = 0; i<9; i++){ float v000 = mom_c[xm][0][0][i]; float v001 = mom_c[xp][0][0][i]; float v010 = mom_c[xm][1][0][i]; float v011 = mom_c[xp][1][0][i]; float v100 = mom_c[xm][0][1][i]; float v101 = mom_c[xp][0][1][i]; float v110 = mom_c[xm][1][1][i]; float v111 = mom_c[xp][1][1][i]; mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf); } if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5]; u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5]; u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5]; u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5]; u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5]; u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5]; u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5]; u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3]; m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3]; m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3]; m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3]; m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3]; m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3]; m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3]; m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; float xpr = 4.f*xf*xf-4.f*xf+1.f; float ypr = 4.f*yf*yf-4.f*yf+1.f; float zpr = 4.f*zf*zf-4.f*zf+1.f; mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } float f[19]; InvertPhysicalMoments(f,mom,SF); if(im != 1 && im != 10){ if(z==0){ for(int i = 0; i<19; i++){ g_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else if(z==gridDim.z*blockDim.z-1){ for(int i = 0; i<19; i++){ h_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else{ for(int i = 0; i<19; i++){ f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i]; } } } } } __global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; //if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) //(true)) if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) && (x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) //(true)) { float f[19]; float mom[8][9];//physical moments of 8 neighboring nodes float S_f[8][6];//strain rate tensor of 8 neighboring nodes int xm = LRLEVEL*(x-LRX0); int ym = LRLEVEL*(y-LRY0); int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR int xp = xm+1; int yp = ym+1; int zp = zm+1; //top nodes. interp between h and h_temp. output to h if(z == zInner+1) { for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],omega_f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],omega_f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],omega_f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],omega_f); } //inner nodes. output to g or f else{ for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],omega_f); } if(ORDER == 1){ for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); } else if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5]; u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5]; u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5]; u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5]; u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5]; u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5]; u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5]; u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom[0][1];m05=mom[0][2];m07=mom[0][3]; m13=mom[1][1];m15=mom[1][2];m17=mom[1][3]; m23=mom[2][1];m25=mom[2][2];m27=mom[2][3]; m33=mom[3][1];m35=mom[3][2];m37=mom[3][3]; m43=mom[4][1];m45=mom[4][2];m47=mom[4][3]; m53=mom[5][1];m55=mom[5][2];m57=mom[5][3]; m63=mom[6][1];m65=mom[6][2];m67=mom[6][3]; m73=mom[7][1];m75=mom[7][2];m77=mom[7][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f; float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f; float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f; mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } InvertPhysicalMoments(f,mom[0],SF); //for(int i = 0; i<19; i++) f[i] = 0.1f; //int GPU = 0; int im = ImageFcn(x,y,GPU*(zInner+2)+z); if(im != 1 && im != 10){ if(z == 0){ for(int i = 0; i<19; i++) g_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else if(z == zInner+1){ for(int i = 0; i<19; i++) h_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else{ for(int i = 0; i<19; i++) f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i]; } } }//end extraction region } void WriteResults(ostream &output, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n"; for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<endl; }} for(int k = 1; k<ZDIM/GPU_N-1; k++){ for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18]; //float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18]; float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17]; float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9]; float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]); float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]); float PI12 = -1.5f*omega*m[13]; float PI23 = -1.5f*omega*m[14]; float PI13 = -1.5f*omega*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //InvertMoments_host(f,m); //u = m[3]; //v = m[5]; //w = m[7]; //m6 = m[6 ]; //m10= m[10]; //m16= m[16]; int z = (ZDIM/GPU_N*GPU+k); output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", " <<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; //<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<endl; }}} for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", " <<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<endl; }} } void WriteResultsLR(ofstream &output, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n"; for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<endl; }} for(int k = 1; k<ZLRDIM/GPU_N-1; k++){ for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k); float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9]; float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]); float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]); float PI12 = -1.5f*omega*m[13]; float PI23 = -1.5f*omega*m[14]; float PI13 = -1.5f*omega*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; //<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<endl; }}} for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<endl; }} } void WriteForces(float **F, ofstream &output, int ForceTime, int level) { float ref = UMAX*UMAX*ZDIM*OBSTR1; if(level > 0) ref *= LRLEVEL*LRLEVEL; for(int i = 0; i<ForceTime; i++){ output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl; } } void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node) { output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl; output<<"Obst1 radius: \t"<<OBSTR1<<endl; output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl; output<<"Obst2 radius: \t"<<OBSTR2<<endl; output<<"RE: \t"<<RE<<endl; output<<"UMAX: \t"<<UMAX<<endl; output<<"omega \t: "<<omega<<endl; output<<"TMAX: \t"<<TMAX<<endl; output<<"STARTF: \t"<<STARTF<<endl; output<<"START_VELAV: \t"<<START_VELAV<<endl; output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl; output<<"REFINEMENT: \t"<<REFINEMENT<<endl; output<<"MODEL: \t"<<MODEL<<endl; output<<"Smagorinsky LES: \t"<<SmagLES<<endl; output<<"CS: \t"<<CS<<endl; output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl; output<<"LR factor \t"<<LRFACTOR<<endl; output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl; output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl; output<<"omega in LR \t: "<<omegaLR<<endl; output<<"GPUs per node \t: "<<GPU_per_node<<endl; } int main(int argc, char *argv[]) { int GPU_N; hipGetDeviceCount(&GPU_N); GPU_N=NUMGPU; cout<<"number of GPUs: "<<GPU_N<<endl; ofstream output; ofstream outputForce; ofstream outputInputs; string FileName = CASENAME; output.open ((FileName+".dat").c_str()); outputForce.open ((FileName+".force").c_str()); outputInputs.open ((FileName+".inputs").c_str()); //size_t memsize, memsize2; size_t pitch = 2; while(pitch<XDIM) pitch=pitch*2; pitch *= sizeof(float);//pitch*sizeof(float); size_t pitch_e = pitch/sizeof(float); cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl; float CharLength = OBSTR1*2.f; float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); if(LRFACTOR == 0.25f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } if(LRFACTOR == 0.125f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR); float SF_fc = 1.f/SF_cf; cout<<SF_cf<<endl; WriteInputs(outputInputs,omega,omegaLR,GPU_N); WriteInputs(cout,omega,omegaLR,GPU_N); if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){ cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl; return 0; } int zInner = ZDIM/GPU_N-2; //excluding halo int ForceTime = max(0,TMAX-STARTF); dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); //2 halo layers per GPU (for 2 GPUs) dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ); dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1); hipStream_t stream_halo[GPU_N]; hipStream_t stream_inner[GPU_N]; //data pointers as 3D array (GPUxCoord) float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N]; float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2]; float *g_temp[GPU_N], *h_temp[GPU_N]; float *F_h[GPU_N][3]; float *F_d[GPU_N][3]; float *F_total[3]; float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3]; float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3]; for(int i = 0; i<3; i++) F_total[i] = (float *)malloc(ForceTime*sizeof(float)); for(int i=0;i<3;i++) for(int j=0;j<(ForceTime);j++) F_total[i][j] = 0; //Malloc and Initialize for each GPU for(int n = 0; n<GPU_N; n++){ f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float)); g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ F_h [n][i] = (float *)malloc(ForceTime*sizeof(float)); velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); } hipSetDevice(n); hipStreamCreate(&stream_halo[n]); hipStreamCreate(&stream_inner[n]); for(int m = 0; m<GPU_N; m++) if(m != n) hipDeviceEnablePeerAccess(m,0); for(int i = 0; i<2; i++){ hipMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float)); hipMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float)); hipMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float)); } hipMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float)); hipMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ hipMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float)); hipMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); hipMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); } //initialize host f_inner for (int i = 0; i < XDIM*YDIM*zInner*19; i++) f_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XDIM*YDIM*19; i++){ g_h[n][i] = 0; h_h[n][i] = 0; } for(int i=0;i<3;i++){ for(int j=0;j<(ForceTime);j++) F_h[n][i][j] = 0; for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){ velAv_h [n][i][j] = 0; velFluc_h[n][i][j] = 0; } } for(int i = 0; i<2; i++){ hipMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice); hipMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice); hipMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice); } for(int i = 0; i<3; i++){ hipMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice); hipMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice); hipMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),hipMemcpyHostToDevice); } //initialization kernels for(int i = 0; i<2; i++){ hipLaunchKernelGGL(( initialize), dim3(grid),dim3(threads), 0, 0, f_d[n][i],pitch_e,zInner,GPU_N); hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_d[n][i],pitch_e, 1,GPU_N); hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_d[n][i],pitch_e, 1,GPU_N); } hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_temp[n],pitch_e, 1,GPU_N); hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_temp[n],pitch_e, 1,GPU_N); }//end Malloc and Initialize //data pointers as 3D array (GPUxCoord) float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N]; float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2]; float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N]; float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3]; float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3]; float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N]; float *interp_h[GPU_N]; size_t pitchLR = 2; while(pitchLR<XLRDIM) pitchLR=pitchLR*2; pitchLR = pitchLR*sizeof(float); size_t pitchLR_e = pitchLR/sizeof(float); cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl; size_t pitchInterp = 2; while(pitchInterp<XLRDIM*LRFACTOR+1) pitchInterp=pitchInterp*2; pitchInterp = pitchInterp*sizeof(float); size_t pitchInterp_e = pitchInterp/sizeof(float); cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl; int zLRInner = ZLRDIM/GPU_N-2; dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ); dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ); dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1); dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL); dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N); cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl; dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ); //setup LR if(REFINEMENT == 1){ for(int n = 0; n<GPU_N; n++){ f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float)); g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); for(int i = 0; i<3; i++){ velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } hipSetDevice(n); for(int i = 0; i<2; i++){ hipMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float)); hipMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); hipMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); } hipMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); hipMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); hipMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); hipMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); hipMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); hipMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); hipMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); for(int i = 0; i<3; i++){ hipMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); hipMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++) f_LR_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XLRDIM*YLRDIM*19; i++){ g_LR_h[n][i] = 0; h_LR_h[n][i] = 0; } for(int i=0;i<3;i++){ for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){ velAv_LR_h [n][i][j] = 0; velFluc_LR_h[n][i][j] = 0; } } for(int i = 0; i<2; i++){ hipMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice); hipMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice); hipMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice); } for(int i = 0; i<3; i++){ hipMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice); hipMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice); } //initialization kernels for(int i = 0; i<2; i++){ hipLaunchKernelGGL(( initializeLR), dim3(LR_grid),dim3(LR_threads), 0, 0, f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N); hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_d[n][i],pitchLR_e, 1,GPU_N); hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_d[n][i],pitchLR_e, 1,GPU_N); } hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_temp[n],pitchLR_e, 1,GPU_N); hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_temp[n],pitchLR_e, 1,GPU_N); }//end of GPU loop for malloc and initialize for LR }//end of LR malloc and initialize hipFuncSetCacheConfig(InterpCF,hipFuncCachePreferShared); int A = 0; int B = 1; int C = 0; int D = 1; for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); size_t mem_avail, mem_total; hipMemGetInfo(&mem_avail,&mem_total); cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n"; cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n"; } struct timeval tdr0,tdr1; double restime; hipDeviceSynchronize(); gettimeofday (&tdr0,NULL); //time loop for(int t = 0; t<TMAX; t++) { //copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs for(int n = 0; n<GPU_N; n++) hipMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); for(int n = 0; n<GPU_N; n++) hipMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); //compute inner nodes on coarse mesh for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( update_inn), dim3(grid),dim3(threads),0,stream_inner[n], f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velFluc_d[n][0],velFluc_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e); } //synchronize halo stream before computing top and bottom nodes for(int n = 0; n<GPU_N; n++) hipStreamSynchronize(stream_halo[n]); //compute top and bottom nodes for(int n = 0; n<GPU_N; n++) { hipSetDevice(n); hipLaunchKernelGGL(( update_top), dim3(g_grid), dim3(threads), 0, stream_halo [n], h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e); hipLaunchKernelGGL(( update_bot), dim3(g_grid), dim3(threads), 0, stream_halo [n], g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e); } //hipDeviceSynchronize(); swap(A,B); if(REFINEMENT == 1){ int flag_F = 0; for(int i = 0; i<LRLEVEL; i++){ if(t>STARTF && i == 0) flag_F = 1; else flag_F = 0; for(int n = 0; n<GPU_N; n++){ hipMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); hipMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( update_inn_LR), dim3(LR_grid),dim3(LR_threads),0,stream_inner[n], f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } for(int n = 0; n<GPU_N; n++) hipStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( update_top_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); hipLaunchKernelGGL(( update_bot_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } if(i == LRLEVEL-1) { for(int n = 0; n<GPU_N; n++) //hipMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); for(int n = 0; n<GPU_N; n++) hipMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipDeviceSynchronize(); } flag_F = 0; swap(C,D); } //interp from coarse grid for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( InterpCF), dim3(Interp_grid),dim3(Interp_threads),0,stream_inner[n], f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner); //hipDeviceSynchronize(); } //interp from fine grid for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]); } for(int n = 0; n<GPU_N; n++) hipStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( InterpFC), dim3(Interp_grid_c),dim3(threads),0,stream_halo[n], f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner); } }//end refinement for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipDeviceSynchronize(); } }//end time loop hipDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; Nodes = XDIM*YDIM*ZDIM; if (REFINEMENT == 1) Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL; cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n"; //D2H Memcpy and write results for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost); hipMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost); hipMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost); for(int i = 0; i<3; i++){ hipMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost); hipMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost); hipMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,hipMemcpyDeviceToHost); } WriteResults(output,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n); output<<endl; for(int i=0;i<3;i++) for(int j=0;j<ForceTime;j++) F_total[i][j] += F_h[n][i][j]; for(int i = 0; i<2; i++){ hipFree(f_d[n][i]); hipFree(g_d[n][i]); hipFree(h_d[n][i]); } hipFree(f_d[n]); hipFree(g_d[n]); hipFree(h_d[n]); hipFree(g_temp[n]); hipFree(h_temp[n]); for(int i=0;i<3;i++) hipFree(F_d[n][i]); hipFree(F_d[n]); }//end Memcpy and write results WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL); if(REFINEMENT == 1){ // output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; // output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n"; for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyDeviceToHost); hipMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost); hipMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost); //hipMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,hipMemcpyDeviceToHost); for(int i = 0; i<3; i++){ hipMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost); hipMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost); } WriteResultsLR(output,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n); output<<endl; for(int i = 0; i<2; i++){ hipFree(f_LR_d[n][i]); hipFree(g_LR_d[n][i]); hipFree(h_LR_d[n][i]); } hipFree(f_LR_d[n]); hipFree(g_LR_d[n]); hipFree(h_LR_d[n]); hipFree(g_LR_temp[n]); hipFree(h_LR_temp[n]); } } return 0; }
d15730842d65b81d14520b7153fdbb0638413d20.cu
#include <cuda.h> #include <iostream> #include <ostream> #include <fstream> #include <sys/time.h> #include <time.h> using namespace std; #define CASENAME "test" #define NUMGPU 1 #define BLOCKSIZEX 32 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define BLOCKSIZELRX 64 #define BLOCKSIZELRY 1 #define BLOCKSIZELRZ 1 #define BLOCKSIZEINTERP 8 #define XDIM 32 #define YDIM 32 #define ZDIM 32 #define TMAX 5000 #define STARTF 0 #define OBSTR1 16.f #define OBSTX1 96.5f #define OBSTY1 127.5f #define OBSTZ1 15.5f #define OBSTR2 16.f #define OBSTX2 224.5f #define OBSTY2 127.5f #define OBSTZ2 31.5f #define LRFACTOR 0.5f #define LRLEVEL 2 #define LRX0 80.25f //minimum x coord of LR #define XLRDIM 128 //number of nodes in x #define LRY0 48.25f #define YLRDIM 128 #define LRZ0 -0.75f #define ZLRDIM 64 #define ORDER 2 //order of accuracy of interpolation //#define LRFACTOR 0.25f //#define LRLEVEL 4 //#define LRX0 80.125f //minimum x coord of LR //#define XLRDIM 256 //number of nodes in x //#define LRY0 48.125f //#define YLRDIM 256 //#define LRZ0 -0.875f //#define ZLRDIM 128 //#define ORDER 2 //order of accuracy of interpolation #define RE 100.f //3000.f//2000.f//100.f; #define UMAX 0.06f #define SmagLES 0 //1,0 #define MODEL "MRT" //BGK,MRT,STREAM #define REFINEMENT 0 //1,0 #define CS 0.02f #define VELAV 1 #define START_VELAV 100000 #define START_VELFLUC 250000 inline __device__ int ImageFcnLR(float x, float y, float z) { int value = 0; // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // value = 10; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // value = 10; if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) { value = 10; } // if(z < 0.5f) // value = 1; // if(z > ZDIM-1-0.5f) // value = 1; return value; } inline __device__ int ImageFcn(int x, int y, int z) { int value = 0; //Cylinder // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // value = 1; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // value = 10; // if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) // value = 10; //Lid Driven Cavity if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1 || x == XDIM-1) value = 1; // else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2) // return 1; else if(x == 0) return 400; // if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) // value = 10; // if(z == 1) // value = 1; // if(z == ZDIM-2) // value = 1; // if(y == 0) // value = 1;//200;//22; // else if(y == YDIM-1) // value = 1;//100; // else if(x == 0) // value = 400;//26; // else if(x == XDIM-1) // //else if(x > 42) // value = 300;//25; // else if(z == 0) // value = 1; // //else if(z == ZDIM-1 || z == ZDIM-2) // else if(z > 41) // value = 1; return value; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-1)*0.5f; float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); return (result); } inline __device__ float PoisProf3D (float x, float y){ x = x-0.5f; y = y-0.5f; float H = 41.f; return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H)); // float radius = (YDIM-1-1)*0.5f; // float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); // return (result); } int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __device__ int dmax(int a,int b) { if (a>b) return a; else return b; } __device__ int dmin_p(int a, int b) { if (a<b) return a; else return 0; } __device__ int dmax_p(int a, int b) { if (a>-1) return a; else return b-1; } inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011, float v100, float v101, float v110, float v111, float x, float y, float z){ return v000*(1.f-x)*(1.f-y)*(1.f-z)+ v001*( x)*(1.f-y)*(1.f-z)+ v010*(1.f-x)*( y)*(1.f-z)+ v011*( x)*( y)*(1.f-z)+ v100*(1.f-x)*(1.f-y)*( z)+ v101*( x)*(1.f-y)*( z)+ v110*(1.f-x)*( y)*( z)+ v111*( x)*( y)*( z); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner) { int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YDIM*(zInner)); return index; } inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner) { int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YLRDIM*(zInner)); return index; } inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner) { int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner)); return index; } inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner) { int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)); return index; } inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch) { int index = (x+y*pitch)+f_num*pitch*YDIM; index = dmax(index); index = dmin(index,19*pitch*YDIM); return index; } inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch) { int index = (x+y*pitch)+f_num*pitch*YLRDIM; index = dmax(index); index = dmin(index,19*pitch*YLRDIM); return index; } inline __device__ void Moments(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void Moments_host(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void InvertMoments_host(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w) { meq[ 0] = rho; meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w); meq[ 2] = 7.53968254f*(u*u+v*v+w*w);; meq[ 3] = u; meq[ 4] = -0.666666667f*u; meq[ 5] = v; meq[ 6] = -0.666666667f*v; meq[ 7] = w; meq[ 8] = -0.666666667f*w; meq[ 9] = 2.f*u*u-(v*v+w*w); meq[11] = v*v-w*w; meq[13] = u*v; meq[14] = v*w; meq[15] = u*w; } //outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f inline __device__ void PhysicalMoments(float* mom, float* f) { mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17]; mom[6] = f[5]+-f[6]+f[7]+-f[8]; mom[7] = f[11]+-f[13]+-f[16]+f[18]; mom[8] = f[10]+-f[12]+-f[15]+f[17]; } inline __device__ void InvertMoments(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF) { float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } //outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15) inline __device__ void StrainRate(float* S, float* m_strain, float omega) { omega = 1.f; float m1 = (-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3])); float m9 = m_strain[4]; float m11= m_strain[5]; float m13= m_strain[6]; float m14= m_strain[7]; float m15= m_strain[8]; S[0] = -0.026315789f*( m1+19.f*omega* m9); S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11)); S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11)); S[3] = -1.5f*omega*m13; S[4] = -1.5f*omega*m14; S[5] = -1.5f*omega*m15; } inline __device__ void mrt_collide(float* f, float omega) { float m[19]; //float u,v,w; m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+ f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]); m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]); m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]); m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]); m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]); m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7])); m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]); m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ; m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5]; m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7]; m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7]; m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ; m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18]; m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18]; if(SmagLES == 1) { // float Pxx = 0.33333333f*(m[1]+2.f*m[0]+m[9]); // float Pyy = Pxx+0.5f*(m[11]-m[9]);//0.3333333f*(m[1]+2.f*m[0]+0.5f*(3.f*m[11]-m[9])); // float Pzz = Pyy-m[11]; // float Q11 = 0.33333333f*(m[0])+m[3]*m[3]-Pxx; // float Q22 = 0.33333333f*(m[0])+m[5]*m[5]-Pyy; // float Q33 = 0.33333333f*(m[0])+m[7]*m[7]-Pzz; // float Q12 = m[3]*m[5]-m[13]; // float Q23 = m[5]*m[7]-m[14]; // float Q13 = m[3]*m[7]-m[15]; //// float Q11 = 0.33333333f*m[0]+m[3]*m[3]-Pxx; //// float Q22 = 0.33333333f*m[0]+m[5]*m[5]-Pyy; //// float Q33 = 0.33333333f*m[0]+m[7]*m[7]-Pzz; //// float Q12 = 0.33333333f*m[0]+m[3]*m[5]-m[13]; //// float Q23 = 0.33333333f*m[0]+m[5]*m[7]-m[14]; //// float Q13 = 0.33333333f*m[0]+m[3]*m[7]-m[15]; float usqr = m[3]*m[3]+m[5]*m[5]+m[7]*m[7]; float u = m[3]; float v = m[5]; float w = m[7]; float rho = m[0]; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f[1 ]-feq1 )+(f[3 ]-feq3 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17); float PI22 = (f[2 ]-feq2 )+(f[4 ]-feq4 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI33 = (f[9 ]-feq9 )+(f[14]-feq14)+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17)+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI12 = (f[5 ]-feq5 )+(f[7 ]-feq7 )-(f[6 ]-feq6 )-(f[8 ]-feq8 ); float PI13 = (f[10]-feq10)+(f[17]-feq17)-(f[12]-feq12)-(f[15]-feq15); float PI23 = (f[11]-feq11)+(f[18]-feq18)-(f[13]-feq13)-(f[16]-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13); float tau0 = 1.f/omega; float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q); omega = 1.f/tau; } f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]); f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); } inline __device__ void North_Extrap(float* f, float rho) { float m[19]; //rho = 1.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void South_Extrap(float* f, float v) { float m[19]; float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void East_Extrap(float* f, float rho) { float m[19]; //rho = 0.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void West_Extrap(float* f, float u, int t) { float m[19]; u = 0.f; float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = UMAX;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; if(t == 1000 || t == 2000 || t == 3000) w = 0.01f; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } __device__ void xsymmetry_bot(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13]=f[18]; f[11]=f[18]; f[16]=f[18]; f[ 6] =f[ 7]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == 0 && z == ZDIM-1){ f[ 4] = f[ 2]; f[11]=f[13]; f[18]=f[13]; f[16]=f[13]; f[ 6] =f[ 7]; f[14]=f[ 9]; f[17]=f[12]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[11]=f[16]; f[18]=f[16]; f[13]=f[16]; f[ 7] =f[ 6]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[16]=f[11]; f[18]=f[11]; f[13]=f[11]; f[ 7] =f[ 6]; f[14]=f[ 9]; f[17]=f[12]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11]=f[13]; f[16]=f[18]; f[ 8] = f[ 5]; } else if(y == YDIM-1){ f[ 4]=f[ 2] ; f[13]=f[11]; f[18]=f[16]; f[ 5]=f[ 8] ; } } f[ 1] = f[ 3] ; f[ 5] = f[ 6] ; f[ 8] = f[ 7] ; f[10]= f[12]; f[15]= f[17]; } __device__ void xsymmetry_top(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13] = f[18]; f[11] = f[18]; f[16] = f[18]; f[ 5] = f[ 8]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == 0 && z == ZDIM-1){ f[ 2] = f[ 4]; f[11] = f[13]; f[18] = f[13]; f[16] = f[13]; f[ 5] = f[ 8]; f[14] = f[ 9]; f[15] = f[10]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[18] = f[16]; f[11] = f[16]; f[13] = f[16]; f[ 8] = f[ 5]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[16] = f[11]; f[18] = f[11]; f[ 8] = f[ 5]; f[14] = f[ 9]; f[15] = f[10]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11] = f[13]; f[16] = f[18]; f[ 5] = f[ 8]; } else if(y == YDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[18] = f[16]; f[ 8] = f[ 5]; } } f[ 3] = f[ 1] ; f[ 6] = f[ 5] ; f[ 7] = f[ 8] ; f[12]= f[10]; f[17]= f[15]; } inline __device__ void vel_av(float* f, float& uAv, float& vAv, int t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1); vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1); } inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR); vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR); } inline __device__ void vel_fluc(float* f, float& uAv, float& vAv, float& ufluc, float& vfluc, int t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1); vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1); } inline __device__ void vel_flucLR(float* f, float& uAv, float& vAv, float& ufluc, float& vfluc, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); } __global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*ZDIM; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcn(xcoord,ycoord,zcoord); float u,v,w,rho; rho = 1.f; u = UMAX; v = 0.01f; w = 0.01f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YDIM*zInner]=f[ i]; } __global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*(zInner+2); xcoord = LRX0+x*LRFACTOR; ycoord = LRY0+y*LRFACTOR; zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z); int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcnLR(xcoord,ycoord,zcoord); float u,v,w,rho; rho = 1.f; u = UMAX; v = 0.0f; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YLRDIM*zInner]=f[ i]; } __global__ void update_top(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_mem(14,x ,y ,pitch)]; f[15]= temp[buff_mem(15,x-1,y ,pitch)]; f[16]= temp[buff_mem(16,x ,y-1,pitch)]; f[17]= temp[buff_mem(17,x+1,y ,pitch)]; f[18]= temp[buff_mem(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_mem(0 ,x,y,pitch)] = f[0 ]; hB[buff_mem(1 ,x,y,pitch)] = f[3 ]; hB[buff_mem(2 ,x,y,pitch)] = f[4 ]; hB[buff_mem(3 ,x,y,pitch)] = f[1 ]; hB[buff_mem(4 ,x,y,pitch)] = f[2 ]; hB[buff_mem(5 ,x,y,pitch)] = f[7 ]; hB[buff_mem(6 ,x,y,pitch)] = f[8 ]; hB[buff_mem(7 ,x,y,pitch)] = f[5 ]; hB[buff_mem(8 ,x,y,pitch)] = f[6 ]; hB[buff_mem(9 ,x,y,pitch)] = f[14]; hB[buff_mem(10,x,y,pitch)] = f[17]; hB[buff_mem(11,x,y,pitch)] = f[18]; hB[buff_mem(12,x,y,pitch)] = f[15]; hB[buff_mem(13,x,y,pitch)] = f[16]; hB[buff_mem(14,x,y,pitch)] = f[9 ]; hB[buff_mem(15,x,y,pitch)] = f[12]; hB[buff_mem(16,x,y,pitch)] = f[13]; hB[buff_mem(17,x,y,pitch)] = f[10]; hB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1); if(im == 26) xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1); mrt_collide(f,omega); for(int i = 0; i<19; i++) hB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ // //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2)); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)]; f[10]= temp[buff_mem(10,x-1,y ,pitch)]; f[11]= temp[buff_mem(11,x ,y-1,pitch)]; f[12]= temp[buff_mem(12,x+1,y ,pitch)]; f[13]= temp[buff_mem(13,x ,y+1,pitch)]; f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)]; f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)]; f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)]; f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)]; f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_mem(0 ,x,y,pitch)] = f[0 ]; gB[buff_mem(1 ,x,y,pitch)] = f[3 ]; gB[buff_mem(2 ,x,y,pitch)] = f[4 ]; gB[buff_mem(3 ,x,y,pitch)] = f[1 ]; gB[buff_mem(4 ,x,y,pitch)] = f[2 ]; gB[buff_mem(5 ,x,y,pitch)] = f[7 ]; gB[buff_mem(6 ,x,y,pitch)] = f[8 ]; gB[buff_mem(7 ,x,y,pitch)] = f[5 ]; gB[buff_mem(8 ,x,y,pitch)] = f[6 ]; gB[buff_mem(9 ,x,y,pitch)] = f[14]; gB[buff_mem(10,x,y,pitch)] = f[17]; gB[buff_mem(11,x,y,pitch)] = f[18]; gB[buff_mem(12,x,y,pitch)] = f[15]; gB[buff_mem(13,x,y,pitch)] = f[16]; gB[buff_mem(14,x,y,pitch)] = f[9 ]; gB[buff_mem(15,x,y,pitch)] = f[12]; gB[buff_mem(16,x,y,pitch)] = f[13]; gB[buff_mem(17,x,y,pitch)] = f[10]; gB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,GPU*(zInner+2)); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)); mrt_collide(f,omega); for(int i = 0; i<19; i++) gB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2)+1+z); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_mem(14,x ,y ,pitch)]; f[15]= h [buff_mem(15,x-1,y ,pitch)]; f[16]= h [buff_mem(16,x ,y-1,pitch)]; f[17]= h [buff_mem(17,x+1,y ,pitch)]; f[18]= h [buff_mem(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_mem(9 ,x ,y ,pitch)]; f[10]= g [buff_mem(10,x-1,y ,pitch)]; f[11]= g [buff_mem(11,x ,y-1,pitch)]; f[12]= g [buff_mem(12,x+1,y ,pitch)]; f[13]= g [buff_mem(13,x ,y+1,pitch)]; f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_mem(10,x,y,z,pitch,zInner)] = f[17]; fB[f_mem(11,x,y,z,pitch,zInner)] = f[18]; fB[f_mem(12,x,y,z,pitch,zInner)] = f[15]; fB[f_mem(13,x,y,z,pitch,zInner)] = f[16]; fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_mem(15,x,y,z,pitch,zInner)] = f[12]; fB[f_mem(16,x,y,z,pitch,zInner)] = f[13]; fB[f_mem(17,x,y,z,pitch,zInner)] = f[10]; fB[f_mem(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)]; float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)+1+z); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)+1+z); mrt_collide(f,omega); if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; vel_av(f,u_Av,v_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM]; vel_fluc(f,u_Av,v_Av,u_fluc,v_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc; } } for(int i = 0; i<19; i++) fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+LRFACTOR*z; int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_memLR(14,x ,y ,pitch)]; f[15]= temp[buff_memLR(15,x-1,y ,pitch)]; f[16]= temp[buff_memLR(16,x ,y-1,pitch)]; f[17]= temp[buff_memLR(17,x+1,y ,pitch)]; f[18]= temp[buff_memLR(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; hB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; hB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; hB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; hB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; hB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; hB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; hB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; hB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; hB[buff_memLR(9 ,x,y,pitch)] = f[14]; hB[buff_memLR(10,x,y,pitch)] = f[17]; hB[buff_memLR(11,x,y,pitch)] = f[18]; hB[buff_memLR(12,x,y,pitch)] = f[15]; hB[buff_memLR(13,x,y,pitch)] = f[16]; hB[buff_memLR(14,x,y,pitch)] = f[9 ]; hB[buff_memLR(15,x,y,pitch)] = f[12]; hB[buff_memLR(16,x,y,pitch)] = f[13]; hB[buff_memLR(17,x,y,pitch)] = f[10]; hB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega); for(int i = 0; i<19; i++) hB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; //int z = (zInner+2)-1; int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; //float zcoord = LRZ0+GPU*LRFACTOR*z; float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1); int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)]; f[10]= temp[buff_memLR(10,x-1,y ,pitch)]; f[11]= temp[buff_memLR(11,x ,y-1,pitch)]; f[12]= temp[buff_memLR(12,x+1,y ,pitch)]; f[13]= temp[buff_memLR(13,x ,y+1,pitch)]; f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)]; f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)]; f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)]; f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)]; f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; gB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; gB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; gB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; gB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; gB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; gB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; gB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; gB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; gB[buff_memLR(9 ,x,y,pitch)] = f[14]; gB[buff_memLR(10,x,y,pitch)] = f[17]; gB[buff_memLR(11,x,y,pitch)] = f[18]; gB[buff_memLR(12,x,y,pitch)] = f[15]; gB[buff_memLR(13,x,y,pitch)] = f[16]; gB[buff_memLR(14,x,y,pitch)] = f[9 ]; gB[buff_memLR(15,x,y,pitch)] = f[12]; gB[buff_memLR(16,x,y,pitch)] = f[13]; gB[buff_memLR(17,x,y,pitch)] = f[10]; gB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega); for(int i = 0; i<19; i++) gB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z)); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_memLR(14,x ,y ,pitch)]; f[15]= h [buff_memLR(15,x-1,y ,pitch)]; f[16]= h [buff_memLR(16,x ,y-1,pitch)]; f[17]= h [buff_memLR(17,x+1,y ,pitch)]; f[18]= h [buff_memLR(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)]; f[10]= g [buff_memLR(10,x-1,y ,pitch)]; f[11]= g [buff_memLR(11,x ,y-1,pitch)]; f[12]= g [buff_memLR(12,x+1,y ,pitch)]; f[13]= g [buff_memLR(13,x ,y+1,pitch)]; f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17]; fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18]; fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15]; fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16]; fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12]; fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13]; fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10]; fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega); if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_avLR(f,u_Av,v_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc; } } for(int i = 0; i<19; i++) fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } /* InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's */ __global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; __shared__ float mom_c[BLOCKSIZEINTERP][2][2][9]; __shared__ float S_c[BLOCKSIZEINTERP][2][2][6]; //int GPU = 0; int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z)); if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and g_temp int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use h and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo int ymax = YLRDIM*LRFACTOR+1; for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner]; // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c); } syncthreads(); if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){ //if(x<LRLEVEL || x>XLRDIM-LRLEVEL-2 || y<LRLEVEL || y>YLRDIM-LRLEVEL-2){ //interpolate from shared mem int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f); int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f); int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f); int xp = xm+1; //int yp = ym+1; int zp = zm+1; float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm; float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym; float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm; float mom[9]; for(int i = 0; i<9; i++){ float v000 = mom_c[xm][0][0][i]; float v001 = mom_c[xp][0][0][i]; float v010 = mom_c[xm][1][0][i]; float v011 = mom_c[xp][1][0][i]; float v100 = mom_c[xm][0][1][i]; float v101 = mom_c[xp][0][1][i]; float v110 = mom_c[xm][1][1][i]; float v111 = mom_c[xp][1][1][i]; mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf); } if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5]; u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5]; u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5]; u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5]; u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5]; u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5]; u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5]; u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3]; m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3]; m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3]; m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3]; m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3]; m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3]; m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3]; m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; float xpr = 4.f*xf*xf-4.f*xf+1.f; float ypr = 4.f*yf*yf-4.f*yf+1.f; float zpr = 4.f*zf*zf-4.f*zf+1.f; mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } float f[19]; InvertPhysicalMoments(f,mom,SF); if(im != 1 && im != 10){ if(z==0){ for(int i = 0; i<19; i++){ g_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else if(z==gridDim.z*blockDim.z-1){ for(int i = 0; i<19; i++){ h_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else{ for(int i = 0; i<19; i++){ f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i]; } } } } } __global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; //if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) //(true)) if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) && (x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) //(true)) { float f[19]; float mom[8][9];//physical moments of 8 neighboring nodes float S_f[8][6];//strain rate tensor of 8 neighboring nodes int xm = LRLEVEL*(x-LRX0); int ym = LRLEVEL*(y-LRY0); int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR int xp = xm+1; int yp = ym+1; int zp = zm+1; //top nodes. interp between h and h_temp. output to h if(z == zInner+1) { for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],omega_f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],omega_f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],omega_f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],omega_f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],omega_f); } //inner nodes. output to g or f else{ for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],omega_f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],omega_f); } if(ORDER == 1){ for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); } else if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5]; u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5]; u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5]; u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5]; u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5]; u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5]; u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5]; u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom[0][1];m05=mom[0][2];m07=mom[0][3]; m13=mom[1][1];m15=mom[1][2];m17=mom[1][3]; m23=mom[2][1];m25=mom[2][2];m27=mom[2][3]; m33=mom[3][1];m35=mom[3][2];m37=mom[3][3]; m43=mom[4][1];m45=mom[4][2];m47=mom[4][3]; m53=mom[5][1];m55=mom[5][2];m57=mom[5][3]; m63=mom[6][1];m65=mom[6][2];m67=mom[6][3]; m73=mom[7][1];m75=mom[7][2];m77=mom[7][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f; float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f; float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f; mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } InvertPhysicalMoments(f,mom[0],SF); //for(int i = 0; i<19; i++) f[i] = 0.1f; //int GPU = 0; int im = ImageFcn(x,y,GPU*(zInner+2)+z); if(im != 1 && im != 10){ if(z == 0){ for(int i = 0; i<19; i++) g_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else if(z == zInner+1){ for(int i = 0; i<19; i++) h_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else{ for(int i = 0; i<19; i++) f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i]; } } }//end extraction region } void WriteResults(ostream &output, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n"; for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<endl; }} for(int k = 1; k<ZDIM/GPU_N-1; k++){ for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18]; //float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18]; float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17]; float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9]; float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]); float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]); float PI12 = -1.5f*omega*m[13]; float PI23 = -1.5f*omega*m[14]; float PI13 = -1.5f*omega*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //InvertMoments_host(f,m); //u = m[3]; //v = m[5]; //w = m[7]; //m6 = m[6 ]; //m10= m[10]; //m16= m[16]; int z = (ZDIM/GPU_N*GPU+k); output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", " <<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; //<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<endl; }}} for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", " <<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<endl; }} } void WriteResultsLR(ofstream &output, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n"; for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<endl; }} for(int k = 1; k<ZLRDIM/GPU_N-1; k++){ for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k); float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9]; float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]); float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]); float PI12 = -1.5f*omega*m[13]; float PI23 = -1.5f*omega*m[14]; float PI13 = -1.5f*omega*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; //<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<endl; }}} for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<endl; }} } void WriteForces(float **F, ofstream &output, int ForceTime, int level) { float ref = UMAX*UMAX*ZDIM*OBSTR1; if(level > 0) ref *= LRLEVEL*LRLEVEL; for(int i = 0; i<ForceTime; i++){ output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl; } } void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node) { output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl; output<<"Obst1 radius: \t"<<OBSTR1<<endl; output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl; output<<"Obst2 radius: \t"<<OBSTR2<<endl; output<<"RE: \t"<<RE<<endl; output<<"UMAX: \t"<<UMAX<<endl; output<<"omega \t: "<<omega<<endl; output<<"TMAX: \t"<<TMAX<<endl; output<<"STARTF: \t"<<STARTF<<endl; output<<"START_VELAV: \t"<<START_VELAV<<endl; output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl; output<<"REFINEMENT: \t"<<REFINEMENT<<endl; output<<"MODEL: \t"<<MODEL<<endl; output<<"Smagorinsky LES: \t"<<SmagLES<<endl; output<<"CS: \t"<<CS<<endl; output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl; output<<"LR factor \t"<<LRFACTOR<<endl; output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl; output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl; output<<"omega in LR \t: "<<omegaLR<<endl; output<<"GPUs per node \t: "<<GPU_per_node<<endl; } int main(int argc, char *argv[]) { int GPU_N; cudaGetDeviceCount(&GPU_N); GPU_N=NUMGPU; cout<<"number of GPUs: "<<GPU_N<<endl; ofstream output; ofstream outputForce; ofstream outputInputs; string FileName = CASENAME; output.open ((FileName+".dat").c_str()); outputForce.open ((FileName+".force").c_str()); outputInputs.open ((FileName+".inputs").c_str()); //size_t memsize, memsize2; size_t pitch = 2; while(pitch<XDIM) pitch=pitch*2; pitch *= sizeof(float);//pitch*sizeof(float); size_t pitch_e = pitch/sizeof(float); cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl; float CharLength = OBSTR1*2.f; float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); if(LRFACTOR == 0.25f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } if(LRFACTOR == 0.125f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR); float SF_fc = 1.f/SF_cf; cout<<SF_cf<<endl; WriteInputs(outputInputs,omega,omegaLR,GPU_N); WriteInputs(cout,omega,omegaLR,GPU_N); if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){ cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl; return 0; } int zInner = ZDIM/GPU_N-2; //excluding halo int ForceTime = max(0,TMAX-STARTF); dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); //2 halo layers per GPU (for 2 GPUs) dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ); dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1); cudaStream_t stream_halo[GPU_N]; cudaStream_t stream_inner[GPU_N]; //data pointers as 3D array (GPUxCoord) float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N]; float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2]; float *g_temp[GPU_N], *h_temp[GPU_N]; float *F_h[GPU_N][3]; float *F_d[GPU_N][3]; float *F_total[3]; float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3]; float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3]; for(int i = 0; i<3; i++) F_total[i] = (float *)malloc(ForceTime*sizeof(float)); for(int i=0;i<3;i++) for(int j=0;j<(ForceTime);j++) F_total[i][j] = 0; //Malloc and Initialize for each GPU for(int n = 0; n<GPU_N; n++){ f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float)); g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ F_h [n][i] = (float *)malloc(ForceTime*sizeof(float)); velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); } cudaSetDevice(n); cudaStreamCreate(&stream_halo[n]); cudaStreamCreate(&stream_inner[n]); for(int m = 0; m<GPU_N; m++) if(m != n) cudaDeviceEnablePeerAccess(m,0); for(int i = 0; i<2; i++){ cudaMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float)); cudaMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float)); cudaMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float)); } cudaMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float)); cudaMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ cudaMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float)); cudaMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); cudaMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); } //initialize host f_inner for (int i = 0; i < XDIM*YDIM*zInner*19; i++) f_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XDIM*YDIM*19; i++){ g_h[n][i] = 0; h_h[n][i] = 0; } for(int i=0;i<3;i++){ for(int j=0;j<(ForceTime);j++) F_h[n][i][j] = 0; for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){ velAv_h [n][i][j] = 0; velFluc_h[n][i][j] = 0; } } for(int i = 0; i<2; i++){ cudaMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice); cudaMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice); cudaMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice); } for(int i = 0; i<3; i++){ cudaMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice); cudaMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice); cudaMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice); } //initialization kernels for(int i = 0; i<2; i++){ initialize<<< grid,threads>>>(f_d[n][i],pitch_e,zInner,GPU_N); initialize<<<g_grid,threads>>>(g_d[n][i],pitch_e, 1,GPU_N); initialize<<<g_grid,threads>>>(h_d[n][i],pitch_e, 1,GPU_N); } initialize<<<g_grid,threads>>>(g_temp[n],pitch_e, 1,GPU_N); initialize<<<g_grid,threads>>>(h_temp[n],pitch_e, 1,GPU_N); }//end Malloc and Initialize //data pointers as 3D array (GPUxCoord) float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N]; float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2]; float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N]; float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3]; float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3]; float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N]; float *interp_h[GPU_N]; size_t pitchLR = 2; while(pitchLR<XLRDIM) pitchLR=pitchLR*2; pitchLR = pitchLR*sizeof(float); size_t pitchLR_e = pitchLR/sizeof(float); cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl; size_t pitchInterp = 2; while(pitchInterp<XLRDIM*LRFACTOR+1) pitchInterp=pitchInterp*2; pitchInterp = pitchInterp*sizeof(float); size_t pitchInterp_e = pitchInterp/sizeof(float); cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl; int zLRInner = ZLRDIM/GPU_N-2; dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ); dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ); dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1); dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL); dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N); cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl; dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ); //setup LR if(REFINEMENT == 1){ for(int n = 0; n<GPU_N; n++){ f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float)); g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); for(int i = 0; i<3; i++){ velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } cudaSetDevice(n); for(int i = 0; i<2; i++){ cudaMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float)); cudaMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); cudaMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); } cudaMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); cudaMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); cudaMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); cudaMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); cudaMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); cudaMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); cudaMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); for(int i = 0; i<3; i++){ cudaMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); cudaMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++) f_LR_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XLRDIM*YLRDIM*19; i++){ g_LR_h[n][i] = 0; h_LR_h[n][i] = 0; } for(int i=0;i<3;i++){ for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){ velAv_LR_h [n][i][j] = 0; velFluc_LR_h[n][i][j] = 0; } } for(int i = 0; i<2; i++){ cudaMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice); cudaMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice); cudaMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice); } for(int i = 0; i<3; i++){ cudaMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice); cudaMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice); } //initialization kernels for(int i = 0; i<2; i++){ initializeLR<<< LR_grid,LR_threads>>>(f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N); initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_d[n][i],pitchLR_e, 1,GPU_N); initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_d[n][i],pitchLR_e, 1,GPU_N); } initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_temp[n],pitchLR_e, 1,GPU_N); initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_temp[n],pitchLR_e, 1,GPU_N); }//end of GPU loop for malloc and initialize for LR }//end of LR malloc and initialize cudaFuncSetCacheConfig(InterpCF,cudaFuncCachePreferShared); int A = 0; int B = 1; int C = 0; int D = 1; for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); size_t mem_avail, mem_total; cudaMemGetInfo(&mem_avail,&mem_total); cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n"; cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n"; } struct timeval tdr0,tdr1; double restime; cudaDeviceSynchronize(); gettimeofday (&tdr0,NULL); //time loop for(int t = 0; t<TMAX; t++) { //copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs for(int n = 0; n<GPU_N; n++) cudaMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); for(int n = 0; n<GPU_N; n++) cudaMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); //compute inner nodes on coarse mesh for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); update_inn<<<grid,threads,0,stream_inner[n]>>>(f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velFluc_d[n][0],velFluc_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e); } //synchronize halo stream before computing top and bottom nodes for(int n = 0; n<GPU_N; n++) cudaStreamSynchronize(stream_halo[n]); //compute top and bottom nodes for(int n = 0; n<GPU_N; n++) { cudaSetDevice(n); update_top<<<g_grid, threads, 0, stream_halo [n]>>>(h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e); update_bot<<<g_grid, threads, 0, stream_halo [n]>>>(g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e); } //cudaDeviceSynchronize(); swap(A,B); if(REFINEMENT == 1){ int flag_F = 0; for(int i = 0; i<LRLEVEL; i++){ if(t>STARTF && i == 0) flag_F = 1; else flag_F = 0; for(int n = 0; n<GPU_N; n++){ cudaMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); cudaMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); update_inn_LR<<<LR_grid,LR_threads,0,stream_inner[n]>>>(f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } for(int n = 0; n<GPU_N; n++) cudaStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); update_top_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); update_bot_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } if(i == LRLEVEL-1) { for(int n = 0; n<GPU_N; n++) //cudaMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); for(int n = 0; n<GPU_N; n++) cudaMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaDeviceSynchronize(); } flag_F = 0; swap(C,D); } //interp from coarse grid for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); InterpCF<<<Interp_grid,Interp_threads,0,stream_inner[n]>>>(f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner); //cudaDeviceSynchronize(); } //interp from fine grid for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]); } for(int n = 0; n<GPU_N; n++) cudaStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); InterpFC<<<Interp_grid_c,threads,0,stream_halo[n]>>>(f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner); } }//end refinement for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaDeviceSynchronize(); } }//end time loop cudaDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; Nodes = XDIM*YDIM*ZDIM; if (REFINEMENT == 1) Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL; cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n"; //D2H Memcpy and write results for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost); cudaMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost); cudaMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost); for(int i = 0; i<3; i++){ cudaMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost); cudaMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost); cudaMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost); } WriteResults(output,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n); output<<endl; for(int i=0;i<3;i++) for(int j=0;j<ForceTime;j++) F_total[i][j] += F_h[n][i][j]; for(int i = 0; i<2; i++){ cudaFree(f_d[n][i]); cudaFree(g_d[n][i]); cudaFree(h_d[n][i]); } cudaFree(f_d[n]); cudaFree(g_d[n]); cudaFree(h_d[n]); cudaFree(g_temp[n]); cudaFree(h_temp[n]); for(int i=0;i<3;i++) cudaFree(F_d[n][i]); cudaFree(F_d[n]); }//end Memcpy and write results WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL); if(REFINEMENT == 1){ // output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; // output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n"; for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyDeviceToHost); cudaMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost); cudaMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost); //cudaMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,cudaMemcpyDeviceToHost); for(int i = 0; i<3; i++){ cudaMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost); cudaMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost); } WriteResultsLR(output,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n); output<<endl; for(int i = 0; i<2; i++){ cudaFree(f_LR_d[n][i]); cudaFree(g_LR_d[n][i]); cudaFree(h_LR_d[n][i]); } cudaFree(f_LR_d[n]); cudaFree(g_LR_d[n]); cudaFree(h_LR_d[n]); cudaFree(g_LR_temp[n]); cudaFree(h_LR_temp[n]); } } return 0; }
20b069e83a519ceef92197d725ca135e8bb2ae06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" #include "tiledMatMult.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(const Matrix M, const Matrix N, Matrix P) { __shared__ float s_M[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float s_N[BLOCK_SIZE][BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int row = ty + by * BLOCK_SIZE; int col = tx + bx * BLOCK_SIZE; float temp = 0; // load data into shared memory for (int i=0; i<(M.width / BLOCK_SIZE); i++) { s_M[ty][tx] = M.elements[row * M.width + i * BLOCK_SIZE + tx]; s_N[ty][tx] = N.elements[(i * BLOCK_SIZE + ty) * N.width + col]; __syncthreads(); for (int k=0; k<BLOCK_SIZE; k++) { temp += s_M[ty][k] * s_N[k][tx]; __syncthreads(); } } P.elements[row * P.width + col] = temp; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
20b069e83a519ceef92197d725ca135e8bb2ae06.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" #include "tiledMatMult.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel – thread specification __global__ void MatrixMulKernel(const Matrix M, const Matrix N, Matrix P) { __shared__ float s_M[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float s_N[BLOCK_SIZE][BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int row = ty + by * BLOCK_SIZE; int col = tx + bx * BLOCK_SIZE; float temp = 0; // load data into shared memory for (int i=0; i<(M.width / BLOCK_SIZE); i++) { s_M[ty][tx] = M.elements[row * M.width + i * BLOCK_SIZE + tx]; s_N[ty][tx] = N.elements[(i * BLOCK_SIZE + ty) * N.width + col]; __syncthreads(); for (int k=0; k<BLOCK_SIZE; k++) { temp += s_M[ty][k] * s_N[k][tx]; __syncthreads(); } } P.elements[row * P.width + col] = temp; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
6c63dbf32073f86c524ecf12a436c979161b682d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // __global_- keyword in CUDA C/C++ indicates a function that // it run on the devices and it is called from host code. // This is the device components processed by NVIDIA compiler(nvcc). __global__ void mykernel(void) { } // Host functions processed by standard host compiler. ex) GCC, VS including Nsight. int main() { // Launch kernel from host code to device code for executing a function on the GPU! // We'll return to the parameters (1,1) in a moment hipLaunchKernelGGL(( mykernel) , dim3(1), dim3(1) , 0, 0, ); printf("Hello, CUDA!\n"); return 0; }
6c63dbf32073f86c524ecf12a436c979161b682d.cu
#include "cuda_runtime.h" #include <stdio.h> // __global_- keyword in CUDA C/C++ indicates a function that // it run on the devices and it is called from host code. // This is the device components processed by NVIDIA compiler(nvcc). __global__ void mykernel(void) { } // Host functions processed by standard host compiler. ex) GCC, VS including Nsight. int main() { // Launch kernel from host code to device code for executing a function on the GPU! // We'll return to the parameters (1,1) in a moment mykernel <<<1, 1 >>>(); printf("Hello, CUDA!\n"); return 0; }
9586ade9cb8cfef12681f67afd4fb796c826ff08.hip
// !!! This is a file automatically generated by hipify!!! #include <LibFkpsConfig.h> #include <LibFkps.h> #include "../LibFkps.hh" #include <hip/hip_runtime.h> #include <hip/hiprtc.h> #include <fstream> #include <string> #include <sstream> LibFkpsErr_t LibFkpsCompile(FKPS lib) { LibFkps_t* _lib = (LibFkps_t*)lib; std::ifstream fileStream; std::string cFileContent; hiprtcProgram prog = nullptr; FILE* libFile = nullptr; char* ptx = nullptr; size_t ptxSize; libFile = fopen(_lib->libFileName->c_str(), "w"); if (!libFile) goto LIB_FAILED; fileStream = std::ifstream(_lib->cFileName->c_str()); if (!fileStream.is_open()) goto LIB_FAILED; cFileContent = std::string((std::istreambuf_iterator<char>(fileStream)), (std::istreambuf_iterator<char>())); hiprtcCreateProgram(&prog, cFileContent.c_str(), _lib->libFileName->c_str(), 0, nullptr, nullptr); if (hiprtcCompileProgram(prog, 0, nullptr) != HIPRTC_SUCCESS) goto LIB_FAILED; hiprtcGetCodeSize(prog, &ptxSize); ptx = new char[ptxSize]; hiprtcGetCode(prog, ptx); hiprtcDestroyProgram(&prog); fprintf(libFile, ptx); fclose(libFile); delete[] ptx; hiprtcDestroyProgram(&prog); fileStream.close(); return LIBFKPS_ERR_SUCCESS; LIB_FAILED: if (!libFile) return LIBFKPS_ERR_LIB_NOT_FOUND; fclose(libFile); if (fileStream.is_open()) fileStream.close(); else return LIBFKPS_ERR_SRC_NOT_FOUND; if (prog) hiprtcDestroyProgram(&prog); if (ptx) delete[] ptx; return LIBFKPS_ERR_COMPILE; }
9586ade9cb8cfef12681f67afd4fb796c826ff08.cu
#include <LibFkpsConfig.h> #include <LibFkps.h> #include "../LibFkps.hh" #include <cuda_runtime.h> #include <nvrtc.h> #include <fstream> #include <string> #include <sstream> LibFkpsErr_t LibFkpsCompile(FKPS lib) { LibFkps_t* _lib = (LibFkps_t*)lib; std::ifstream fileStream; std::string cFileContent; nvrtcProgram prog = nullptr; FILE* libFile = nullptr; char* ptx = nullptr; size_t ptxSize; libFile = fopen(_lib->libFileName->c_str(), "w"); if (!libFile) goto LIB_FAILED; fileStream = std::ifstream(_lib->cFileName->c_str()); if (!fileStream.is_open()) goto LIB_FAILED; cFileContent = std::string((std::istreambuf_iterator<char>(fileStream)), (std::istreambuf_iterator<char>())); nvrtcCreateProgram(&prog, cFileContent.c_str(), _lib->libFileName->c_str(), 0, nullptr, nullptr); if (nvrtcCompileProgram(prog, 0, nullptr) != NVRTC_SUCCESS) goto LIB_FAILED; nvrtcGetPTXSize(prog, &ptxSize); ptx = new char[ptxSize]; nvrtcGetPTX(prog, ptx); nvrtcDestroyProgram(&prog); fprintf(libFile, ptx); fclose(libFile); delete[] ptx; nvrtcDestroyProgram(&prog); fileStream.close(); return LIBFKPS_ERR_SUCCESS; LIB_FAILED: if (!libFile) return LIBFKPS_ERR_LIB_NOT_FOUND; fclose(libFile); if (fileStream.is_open()) fileStream.close(); else return LIBFKPS_ERR_SRC_NOT_FOUND; if (prog) nvrtcDestroyProgram(&prog); if (ptx) delete[] ptx; return LIBFKPS_ERR_COMPILE; }
3c4a7d34ac2cf18c4bbdb329a0ff657c2ef69621.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<time.h> #define T 1024 // max threads per block __global__ void vecAdd(int *a,int *b,int *c,int N); int main(){ int N=2048; int curr=2; printf("-----------------------------------------\n"); while(N<=T*13){ int a[N], b[N], gpu_add[N],cpu_add[N]; int *dev_a,*dev_b,*dev_c; float time_gpu,time_cpu,timeindex,timeinit; for(int i=0;i<N;i++){ a[i]=i+i; b[i]=i*i; } int size=N*sizeof(int); hipMalloc((void**) &dev_a,size); hipMalloc((void**) &dev_b,size); hipMalloc((void**) &dev_c,size); hipEvent_t startinit,endinit; hipEventCreate(&startinit); hipEventCreate(&endinit); hipEventRecord(startinit, 0); hipMemcpy(dev_a,a,size,hipMemcpyHostToDevice); hipMemcpy(dev_b,b,size,hipMemcpyHostToDevice); hipEventRecord(endinit, 0); hipEventSynchronize(endinit); hipEventElapsedTime(&timeinit, startinit, endinit); hipEvent_t gpu_start,gpu_end; hipEventCreate(&gpu_start); hipEventCreate(&gpu_end); hipEventRecord(gpu_start, 0); hipLaunchKernelGGL(( vecAdd), dim3((int)(N+T)/T),dim3(T), 0, 0, dev_a,dev_b,dev_c,N); hipDeviceSynchronize(); hipEventRecord(gpu_end, 0); hipEventSynchronize(gpu_end); hipEventElapsedTime(&time_gpu, gpu_start, gpu_end); hipEvent_t startindex,endindex; hipEventCreate(&startindex); hipEventCreate(&endindex); hipEventRecord(startindex, 0); hipMemcpy(gpu_add,dev_c,size,hipMemcpyDeviceToHost); hipEventRecord(endindex, 0); hipEventSynchronize(endindex); hipEventElapsedTime(&timeindex, startindex, endindex); clock_t cpu_start,cpu_end; cpu_start=clock(); for(int i=0;i<N;i++){ cpu_add[i]=a[i]+b[i]; } cpu_end=clock(); timeinit/=1000; timeindex/=1000; time_gpu/=1000; time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC); printf("Time for sending initial data from host to device : %f\t sec\n",timeinit); printf("Cuda program launched with %d block and %d threads\n",(int)(N+T)/T,T); printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex); printf("GPU Time:%f seconds\n",time_gpu); printf("CPU Time:%f seconds\n",time_cpu); int flag=1; for(int i=0;i<N;i++){ //aprintf("%d - %d - %d\n",gpu_add[i],cpu_add[i],i); if(gpu_add[i]!=cpu_add[i]){ flag=0; break; } } if(flag){ printf("TEST PASSED\n"); printf("SPEED UP:%f\n",time_cpu/time_gpu); } else{ printf("TEST FAILED\n"); } hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); printf("---------------------------------------------------------\n"); curr++; N=T*curr; } exit(0); } __global__ void vecAdd(int *a,int *b,int *c,int N){ int i=blockIdx.x*blockDim.x+threadIdx.x; if(i<N){ c[i]=a[i]+b[i]; } }
3c4a7d34ac2cf18c4bbdb329a0ff657c2ef69621.cu
#include<stdio.h> #include<stdlib.h> #include<time.h> #define T 1024 // max threads per block __global__ void vecAdd(int *a,int *b,int *c,int N); int main(){ int N=2048; int curr=2; printf("-----------------------------------------\n"); while(N<=T*13){ int a[N], b[N], gpu_add[N],cpu_add[N]; int *dev_a,*dev_b,*dev_c; float time_gpu,time_cpu,timeindex,timeinit; for(int i=0;i<N;i++){ a[i]=i+i; b[i]=i*i; } int size=N*sizeof(int); cudaMalloc((void**) &dev_a,size); cudaMalloc((void**) &dev_b,size); cudaMalloc((void**) &dev_c,size); cudaEvent_t startinit,endinit; cudaEventCreate(&startinit); cudaEventCreate(&endinit); cudaEventRecord(startinit, 0); cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice); cudaEventRecord(endinit, 0); cudaEventSynchronize(endinit); cudaEventElapsedTime(&timeinit, startinit, endinit); cudaEvent_t gpu_start,gpu_end; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_end); cudaEventRecord(gpu_start, 0); vecAdd<<<(int)(N+T)/T,T>>>(dev_a,dev_b,dev_c,N); cudaDeviceSynchronize(); cudaEventRecord(gpu_end, 0); cudaEventSynchronize(gpu_end); cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end); cudaEvent_t startindex,endindex; cudaEventCreate(&startindex); cudaEventCreate(&endindex); cudaEventRecord(startindex, 0); cudaMemcpy(gpu_add,dev_c,size,cudaMemcpyDeviceToHost); cudaEventRecord(endindex, 0); cudaEventSynchronize(endindex); cudaEventElapsedTime(&timeindex, startindex, endindex); clock_t cpu_start,cpu_end; cpu_start=clock(); for(int i=0;i<N;i++){ cpu_add[i]=a[i]+b[i]; } cpu_end=clock(); timeinit/=1000; timeindex/=1000; time_gpu/=1000; time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC); printf("Time for sending initial data from host to device : %f\t sec\n",timeinit); printf("Cuda program launched with %d block and %d threads\n",(int)(N+T)/T,T); printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex); printf("GPU Time:%f seconds\n",time_gpu); printf("CPU Time:%f seconds\n",time_cpu); int flag=1; for(int i=0;i<N;i++){ //aprintf("%d - %d - %d\n",gpu_add[i],cpu_add[i],i); if(gpu_add[i]!=cpu_add[i]){ flag=0; break; } } if(flag){ printf("TEST PASSED\n"); printf("SPEED UP:%f\n",time_cpu/time_gpu); } else{ printf("TEST FAILED\n"); } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); printf("---------------------------------------------------------\n"); curr++; N=T*curr; } exit(0); } __global__ void vecAdd(int *a,int *b,int *c,int N){ int i=blockIdx.x*blockDim.x+threadIdx.x; if(i<N){ c[i]=a[i]+b[i]; } }
2cf6e4f52b11d57b4af2448886b0e89e144050f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //xfail:REPAIR_ERROR //--blockDim=2048 --gridDim=64 struct s { float *p[42]; }; __global__ void foo(s q) { __requires_fresh_array(q.p[4]); q.p[4][0] = threadIdx.x; }
2cf6e4f52b11d57b4af2448886b0e89e144050f5.cu
//xfail:REPAIR_ERROR //--blockDim=2048 --gridDim=64 struct s { float *p[42]; }; __global__ void foo(s q) { __requires_fresh_array(q.p[4]); q.p[4][0] = threadIdx.x; }
613c7b175e406dfd3388a968cbae995b8bcd5522.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> // #include <THH/THHAtomics.cuh> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __global__ void MaskedIm2colForward(const int n, const scalar_t *data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_col) { // mask_cnt * channels CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_col = mask_h_idx[m_index]; const int w_col = mask_w_idx[m_index]; const int c_im = index / mask_cnt; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col - pad_h; const int w_offset = w_col - pad_w; scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index; for (int i = 0; i < kernel_h; ++i) { int h_im = h_offset + i; for (int j = 0; j < kernel_w; ++j) { int w_im = w_offset + j; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { *data_col_ptr = (scalar_t)data_im[(c_im * height + h_im) * width + w_im]; } else { *data_col_ptr = 0.0; } data_col_ptr += mask_cnt; } } } } int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF(bottom_data.type(), "MaskedIm2colLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); hipLaunchKernelGGL(( MaskedIm2colForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_data_, height, width, kernel_h, kernel_w, pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(hipGetLastError()); return 1; } template <typename scalar_t> __global__ void MaskedCol2imForward(const int n, const scalar_t *data_col, const int height, const int width, const int channels, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_im = mask_h_idx[m_index]; const int w_im = mask_w_idx[m_index]; const int c_im = index / mask_cnt; // int kernel_extent_w = (kernel_w - 1) + 1; // int kernel_extent_h = (kernel_h - 1) + 1; // compute the start and end of the output data_im[(c_im * height + h_im) * width + w_im] = data_col[index]; } } int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.type(), "MaskedCol2imLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); hipLaunchKernelGGL(( MaskedCol2imForward<scalar_t>), dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_data_, height, width, channels, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(hipGetLastError()); return 1; }
613c7b175e406dfd3388a968cbae995b8bcd5522.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> // #include <THC/THCAtomics.cuh> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __global__ void MaskedIm2colForward(const int n, const scalar_t *data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_col) { // mask_cnt * channels CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_col = mask_h_idx[m_index]; const int w_col = mask_w_idx[m_index]; const int c_im = index / mask_cnt; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col - pad_h; const int w_offset = w_col - pad_w; scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index; for (int i = 0; i < kernel_h; ++i) { int h_im = h_offset + i; for (int j = 0; j < kernel_w; ++j) { int w_im = w_offset + j; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { *data_col_ptr = (scalar_t)data_im[(c_im * height + h_im) * width + w_im]; } else { *data_col_ptr = 0.0; } data_col_ptr += mask_cnt; } } } } int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF(bottom_data.type(), "MaskedIm2colLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); MaskedIm2colForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data_, height, width, kernel_h, kernel_w, pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(cudaGetLastError()); return 1; } template <typename scalar_t> __global__ void MaskedCol2imForward(const int n, const scalar_t *data_col, const int height, const int width, const int channels, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_im = mask_h_idx[m_index]; const int w_im = mask_w_idx[m_index]; const int c_im = index / mask_cnt; // int kernel_extent_w = (kernel_w - 1) + 1; // int kernel_extent_h = (kernel_h - 1) + 1; // compute the start and end of the output data_im[(c_im * height + h_im) * width + w_im] = data_col[index]; } } int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.type(), "MaskedCol2imLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); MaskedCol2imForward<scalar_t><<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data_, height, width, channels, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(cudaGetLastError()); return 1; }
0552743d0e1086853a96181985ed9bb12eee4d02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zlascl2.cu, normal z -> d, Mon Jun 25 18:24:12 2018 @author Theo Mary */ #include "magma_internal.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /***************************************************************************//** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @see magma_dlascl_diag @ingroup magma_lascl_diag *******************************************************************************/ extern "C" void magmablas_dlascl2( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( magma_ceildiv( m, NB ) ); dim3 threads( NB ); if (type == MagmaLower) { hipLaunchKernelGGL(( dlascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( dlascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda); } else if (type == MagmaFull) { hipLaunchKernelGGL(( dlascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda); } }
0552743d0e1086853a96181985ed9bb12eee4d02.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zlascl2.cu, normal z -> d, Mon Jun 25 18:24:12 2018 @author Theo Mary */ #include "magma_internal.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /***************************************************************************//** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @see magma_dlascl_diag @ingroup magma_lascl_diag *******************************************************************************/ extern "C" void magmablas_dlascl2( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( magma_ceildiv( m, NB ) ); dim3 threads( NB ); if (type == MagmaLower) { dlascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda); } else if (type == MagmaUpper) { dlascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda); } else if (type == MagmaFull) { dlascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda); } }
863aa9e2bacfb25a86859c3c7888db5b73680971.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgeadd_batched.cu, normal z -> d, Thu Oct 8 23:05:35 2020 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ /* Batches dlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void dgeadd_batched_kernel( int m, int n, double alpha, const double * const *dAarray, int ldda, double **dBarray, int lddb ) { // dA and dB iterate across row i const double *dA = dAarray[ blockIdx.y ]; double *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const double *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /***************************************************************************//** Purpose ------- ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] alpha DOUBLE PRECISION The scalar alpha. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[in,out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_geadd_batched *******************************************************************************/ extern "C" void magmablas_dgeadd_batched( magma_int_t m, magma_int_t n, double alpha, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid( magma_ceildiv( m, NB ), ibatch ); hipLaunchKernelGGL(( dgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, dAarray+i, ldda, dBarray+i, lddb ); } }
863aa9e2bacfb25a86859c3c7888db5b73680971.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgeadd_batched.cu, normal z -> d, Thu Oct 8 23:05:35 2020 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ /* Batches dlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void dgeadd_batched_kernel( int m, int n, double alpha, const double * const *dAarray, int ldda, double **dBarray, int lddb ) { // dA and dB iterate across row i const double *dA = dAarray[ blockIdx.y ]; double *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const double *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /***************************************************************************//** Purpose ------- ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] alpha DOUBLE PRECISION The scalar alpha. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[in,out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_geadd_batched *******************************************************************************/ extern "C" void magmablas_dgeadd_batched( magma_int_t m, magma_int_t n, double alpha, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid( magma_ceildiv( m, NB ), ibatch ); dgeadd_batched_kernel<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, dAarray+i, ldda, dBarray+i, lddb ); } }
7945b5627783c52bd2a7b69447c53c5ac2217ee4.hip
// !!! This is a file automatically generated by hipify!!! /* * EDDL Library - European Distributed Deep Learning Library. * Version: 1.1 * copyright (c) 2022, Universitat Politcnica de Valncia (UPV), PRHLT Research Centre * Date: March 2022 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]) * All rights reserved */ #include <string.h> #include <cstdio> #include <cstdlib> #include <iostream> #include <hip/hip_runtime.h> __global__ void gpu_isfinite(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isfinite(A[thread_id_x]); } } __global__ void gpu_isinf(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]); } } __global__ void gpu_isnan(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isnan(A[thread_id_x]); } } __global__ void gpu_isneginf(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] < 0.0f; } } __global__ void gpu_isposinf(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] > 0.0f; } } __global__ void gpu_logical_and(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] & (bool)B[thread_id_x]; } } __global__ void gpu_logical_or(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] | (bool)B[thread_id_x]; } } __global__ void gpu_logical_not(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = !((bool)A[thread_id_x]); } } __global__ void gpu_logical_xor(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] ^ (bool)B[thread_id_x]; } } __global__ void gpu_allclose(float *A, float *B, float rtol, float atol, bool equal_nan, long int size, bool &allclose){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(!allclose) return; // Abort if there is a result if (thread_id_x < size && allclose){ bool close = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); if (!close){ allclose = false; // return; } } } __global__ void gpu_isclose(float *A, float *B, float *C, float rtol, float atol, bool equal_nan, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); } } __global__ void gpu_greater(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] > v; } } __global__ void gpu_greater(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] > B[thread_id_x]; } } __global__ void gpu_greater_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] >= v; } } __global__ void gpu_greater_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] >= B[thread_id_x]; } } __global__ void gpu_less(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] < v; } } __global__ void gpu_less(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] < B[thread_id_x]; } } __global__ void gpu_less_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] <= v; } } __global__ void gpu_less_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] <= B[thread_id_x]; } } __global__ void gpu_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] == v; } } __global__ void gpu_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] == B[thread_id_x]; } } __global__ void gpu_not_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] != v; } } __global__ void gpu_not_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] != B[thread_id_x]; } }
7945b5627783c52bd2a7b69447c53c5ac2217ee4.cu
/* * EDDL Library - European Distributed Deep Learning Library. * Version: 1.1 * copyright (c) 2022, Universitat Politècnica de València (UPV), PRHLT Research Centre * Date: March 2022 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]) * All rights reserved */ #include <string.h> #include <cstdio> #include <cstdlib> #include <iostream> #include <cuda.h> __global__ void gpu_isfinite(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isfinite(A[thread_id_x]); } } __global__ void gpu_isinf(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]); } } __global__ void gpu_isnan(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isnan(A[thread_id_x]); } } __global__ void gpu_isneginf(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] < 0.0f; } } __global__ void gpu_isposinf(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] > 0.0f; } } __global__ void gpu_logical_and(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] & (bool)B[thread_id_x]; } } __global__ void gpu_logical_or(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] | (bool)B[thread_id_x]; } } __global__ void gpu_logical_not(float *A, float *B, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = !((bool)A[thread_id_x]); } } __global__ void gpu_logical_xor(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] ^ (bool)B[thread_id_x]; } } __global__ void gpu_allclose(float *A, float *B, float rtol, float atol, bool equal_nan, long int size, bool &allclose){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(!allclose) return; // Abort if there is a result if (thread_id_x < size && allclose){ bool close = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); if (!close){ allclose = false; // return; } } } __global__ void gpu_isclose(float *A, float *B, float *C, float rtol, float atol, bool equal_nan, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); } } __global__ void gpu_greater(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] > v; } } __global__ void gpu_greater(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] > B[thread_id_x]; } } __global__ void gpu_greater_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] >= v; } } __global__ void gpu_greater_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] >= B[thread_id_x]; } } __global__ void gpu_less(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] < v; } } __global__ void gpu_less(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] < B[thread_id_x]; } } __global__ void gpu_less_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] <= v; } } __global__ void gpu_less_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] <= B[thread_id_x]; } } __global__ void gpu_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] == v; } } __global__ void gpu_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] == B[thread_id_x]; } } __global__ void gpu_not_equal(float *A, float *B, float v, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] != v; } } __global__ void gpu_not_equal(float *A, float *B, float *C, long int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] != B[thread_id_x]; } }
62aa1308e1510bab285acc6ebe571c907a781854.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "loop.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( loop), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( loop), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( loop), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
62aa1308e1510bab285acc6ebe571c907a781854.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "loop.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); loop<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { loop<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { loop<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
748323d9e8855c08a2d8e745e87a34918c4ab9dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/ClassNLLCriterion.hip" #else void THNN_(ClassNLLCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(sizeLegacyNoScalars)(state, input, n_dims - 1); if (weights) { THCUNN_assertSameGPU( state, 5, input, target, weights, output, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, output, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights); THError("weight tensor should be defined either for all %d classes or no classes" " but got weight tensor of shape: %s", n_classes, s1.str); } if (reduction == at::Reduction::None && n_dims == 2) { THCTensor_(resize1d)(state, output, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } hipLaunchKernelGGL(( ClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>) , dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, toDeviceTensor<scalar_t, 2>(state, input), toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, output), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); THCudaCheck(hipGetLastError()); if (weights) { THCTensor_(free)(state, weights); } return; } THCTensor_(resize0d)(state, output); THCTensor_(resize0d)(state, total_weight); input = THCTensor_(newContiguous)(state, input); weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *output_data = THCTensor_(data)(state, output); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel1<scalar_t>) , dim3(1), dim3(1), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); } else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>) , dim3(1), dim3(NTHREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); } THCudaCheck(hipGetLastError()); if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); THCTensor_(free)(state, input); } void THNN_(ClassNLLCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(size)(state, input, n_dims - 1); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous"); if (weights) { THCUNN_assertSameGPU( state, 5, weights, input, target, gradInput, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, gradInput, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } if (reduction == at::Reduction::None && n_dims == 2) { THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } hipLaunchKernelGGL(( ClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>) , dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, gradOutput), toDeviceTensor<scalar_t, 2>(state, gradInput), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); THCudaCheck(hipGetLastError()); if (weights) { THCTensor_(free)(state, weights); } return; } weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; scalar_t *gradInput_data = THCTensor_(data)(state, gradInput); THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel1<scalar_t>) , dim3(1), dim3(1), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, weights_data, target_data, total_weight_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); } else { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel<scalar_t>) , dim3(1), dim3(NTHREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, target_data, weights_data, total_weight_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); } THCudaCheck(hipGetLastError()); if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); } #endif
748323d9e8855c08a2d8e745e87a34918c4ab9dd.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/ClassNLLCriterion.cu" #else void THNN_(ClassNLLCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(sizeLegacyNoScalars)(state, input, n_dims - 1); if (weights) { THCUNN_assertSameGPU( state, 5, input, target, weights, output, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, output, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights); THError("weight tensor should be defined either for all %d classes or no classes" " but got weight tensor of shape: %s", n_classes, s1.str); } if (reduction == at::Reduction::None && n_dims == 2) { THCTensor_(resize1d)(state, output, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } ClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t> <<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( batch_size, toDeviceTensor<scalar_t, 2>(state, input), toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, output), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); THCudaCheck(cudaGetLastError()); if (weights) { THCTensor_(free)(state, weights); } return; } THCTensor_(resize0d)(state, output); THCTensor_(resize0d)(state, total_weight); input = THCTensor_(newContiguous)(state, input); weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *output_data = THCTensor_(data)(state, output); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { cunn_ClassNLLCriterion_updateOutput_kernel1<scalar_t> <<<1, 1, 0, c10::cuda::getCurrentCUDAStream()>>>( output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); } else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) { cunn_ClassNLLCriterion_updateOutput_kernel<scalar_t, accreal> <<<1, NTHREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); } THCudaCheck(cudaGetLastError()); if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); THCTensor_(free)(state, input); } void THNN_(ClassNLLCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(size)(state, input, n_dims - 1); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous"); if (weights) { THCUNN_assertSameGPU( state, 5, weights, input, target, gradInput, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, gradInput, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } if (reduction == at::Reduction::None && n_dims == 2) { THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } ClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t> <<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( batch_size, toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, gradOutput), toDeviceTensor<scalar_t, 2>(state, gradInput), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); THCudaCheck(cudaGetLastError()); if (weights) { THCTensor_(free)(state, weights); } return; } weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; scalar_t *gradInput_data = THCTensor_(data)(state, gradInput); THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { cunn_ClassNLLCriterion_updateGradInput_kernel1<scalar_t> <<<1, 1, 0, c10::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, weights_data, target_data, total_weight_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); } else { cunn_ClassNLLCriterion_updateGradInput_kernel<scalar_t> <<<1, NTHREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, target_data, weights_data, total_weight_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); } THCudaCheck(cudaGetLastError()); if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); } #endif
d06fb0b0d568ae18d64520bcb2acf921cd35d6fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cu_mat_scalar_multiply(double *A, double B, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ A[tid] = __fmul_rd(A[tid], B); tid += stride; } }
d06fb0b0d568ae18d64520bcb2acf921cd35d6fe.cu
#include "includes.h" __global__ void cu_mat_scalar_multiply(double *A, double B, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ A[tid] = __fmul_rd(A[tid], B); tid += stride; } }
42a8e3237446d05f1b571273dabd0eab5df8dcb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * a simple test */ __shared__ float data1[32][32]; __shared__ float data2[32][32]; __shared__ float data3[32][32]; __device__ void mult(__shared__ float d1[32][32], __shared__ float d2[32][32], __shared__ float d3[32][32], int idx) { int i; int j, k, l; j = -1; k = 0; l = 1; for (i = 0; i < 31; i++) { d1[idx][i] = d2[idx][j+1] + d2[idx][j] + d2[idx][j-1] + d2[idx][k+1] + d2[idx][k] + d2[idx][k-1] + d2[idx][l+1] + d2[idx][l] + d2[idx][l-1]; j++; k++; l++; } } __global__ void doit(int start, int end) { int i; for (i = start; i < end; i++) { mult(data1, data2, data3, i); } }
42a8e3237446d05f1b571273dabd0eab5df8dcb0.cu
/* * a simple test */ __shared__ float data1[32][32]; __shared__ float data2[32][32]; __shared__ float data3[32][32]; __device__ void mult(__shared__ float d1[32][32], __shared__ float d2[32][32], __shared__ float d3[32][32], int idx) { int i; int j, k, l; j = -1; k = 0; l = 1; for (i = 0; i < 31; i++) { d1[idx][i] = d2[idx][j+1] + d2[idx][j] + d2[idx][j-1] + d2[idx][k+1] + d2[idx][k] + d2[idx][k-1] + d2[idx][l+1] + d2[idx][l] + d2[idx][l-1]; j++; k++; l++; } } __global__ void doit(int start, int end) { int i; for (i = start; i < end; i++) { mult(data1, data2, data3, i); } }
bc3b2eb4c7bc4f5d4c7e14ed425385a81ffb9b5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <stdio.h> __device__ float doTheCalculation(float f) { return f * f * f; } __global__ void cube(float *d_in, float *d_out) { int idx = threadIdx.x + blockIdx.x * blockDim.x; float f = d_in[idx]; //d_out[idx] = f * f *f; d_out[idx] = doTheCalculation(f); } int main() { const int ARRAY_SIZE = 4000; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for(int i=0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare memory pointers float * d_in; float * d_out; // allocating memory for GPU variables hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES); hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); hipLaunchKernelGGL(( cube), dim3(4), dim3(1000), 0, 0, d_in, d_out); hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); for(int i=0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i%4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
bc3b2eb4c7bc4f5d4c7e14ed425385a81ffb9b5c.cu
# include <stdio.h> __device__ float doTheCalculation(float f) { return f * f * f; } __global__ void cube(float *d_in, float *d_out) { int idx = threadIdx.x + blockIdx.x * blockDim.x; float f = d_in[idx]; //d_out[idx] = f * f *f; d_out[idx] = doTheCalculation(f); } int main() { const int ARRAY_SIZE = 4000; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for(int i=0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare memory pointers float * d_in; float * d_out; // allocating memory for GPU variables cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); cube<<<4, 1000>>>(d_in, d_out); cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); for(int i=0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i%4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
0bdde8d6100f9566e50e10288f961fdec510e603.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "Network.h" #include <stdio.h> #include <hip/hip_runtime.h> int main() { ErrorHandler *error = &ErrorHandler::getInstance(); Network train; float learningrate = 0.05; int trainiter = 1; train.train(false,false, learningrate, trainiter); train.test(false); system("Pause"); }
0bdde8d6100f9566e50e10288f961fdec510e603.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Network.h" #include <stdio.h> #include <cuda.h> int main() { ErrorHandler *error = &ErrorHandler::getInstance(); Network train; float learningrate = 0.05; int trainiter = 1; train.train(false,false, learningrate, trainiter); train.test(false); system("Pause"); }
88723c3a51e51e8e4d60ad46d61d53cffa119fba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Viola-Jones Object Detection Framework // // The algorithm and code are explained in the upcoming GPU Computing Gems // chapter in detail: // // Anton Obukhov, "Haar Classifiers for Object Detection with CUDA" // PDF URL placeholder // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov. // //////////////////////////////////////////////////////////////////////////////// #if !defined CUDA_DISABLER #include <algorithm> #include <cstdio> #include "NCV.hpp" #include "NCVAlg.hpp" #include "NPP_staging/NPP_staging.hpp" #include "NCVRuntimeTemplates.hpp" #include "NCVHaarObjectDetection.hpp" #include "opencv2/core/cuda/warp.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" //============================================================================== // // BlockScan file // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 __device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data) { #if __CUDA_ARCH__ >= 300 const unsigned int laneId = cv::gpu::cudev::Warp::laneId(); // scan on shuffl functions #pragma unroll for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) { const Ncv32u n = cv::gpu::cudev::shfl_up(idata, i); if (laneId >= i) idata += n; } return idata; #else Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; #endif } __device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <Ncv32u tiNumScanThreads> __device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan Ncv32u warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements Ncv32u val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // HaarClassifierCascade file // //============================================================================== const Ncv32u MAX_GRID_DIM = 65535; const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64; #define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6 #define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2) /** \internal * Haar features solid array. */ texture<uint2, 1, hipReadModeElementType> texHaarFeatures; /** \internal * Haar classifiers flattened trees container. * Two parts: first contains root nodes, second - nodes that are referred by root nodes. * Drawback: breaks tree locality (might cause more cache misses * Advantage: No need to introduce additional 32-bit field to index root nodes offsets */ texture<uint4, 1, hipReadModeElementType> texHaarClassifierNodes; texture<Ncv32u, 1, hipReadModeElementType> texIImage; __device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages) { return d_Stages[iStage]; } template <NcvBool tbCacheTextureCascade> __device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes) { HaarClassifierNode128 tmpNode; if (tbCacheTextureCascade) { tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode); } else { tmpNode = d_ClassifierNodes[iNode]; } return tmpNode; } template <NcvBool tbCacheTextureCascade> __device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features, Ncv32f *weight, Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight) { HaarFeature64 feature; if (tbCacheTextureCascade) { feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature); } else { feature = d_Features[iFeature]; } feature.getRect(rectX, rectY, rectWidth, rectHeight); *weight = feature.getWeight(); } template <NcvBool tbCacheTextureIImg> __device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg) { if (tbCacheTextureIImg) { return tex1Dfetch(texIImage, x); } else { return d_IImg[x]; } } __device__ Ncv32u d_outMaskPosition; __device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 __shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2]; __shared__ Ncv32u numPassed; __shared__ Ncv32u outMaskOffset; Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem); __syncthreads(); if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1) { numPassed = incScan; outMaskOffset = atomicAdd(&d_outMaskPosition, incScan); } if (threadPassFlag) { Ncv32u excScan = incScan - threadPassFlag; shmem[excScan] = threadElem; } __syncthreads(); if (threadIdx.x < numPassed) { vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x]; } #endif } template <NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u y_offs; Ncv32u x_offs; Ncv32u maskOffset; Ncv32u outMaskVal; NcvBool bInactiveThread = false; if (tbReadPixelIndexFromVector) { maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (maskOffset >= mask1Dlen) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { outMaskVal = d_inMask[maskOffset]; y_offs = outMaskVal >> 16; x_offs = outMaskVal & 0xFFFF; } } else { y_offs = blockIdx.y; x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (x_offs >= mask2Dstride) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { maskOffset = y_offs * mask2Dstride + x_offs; if ((x_offs >= anchorsRoi.width) || (!tbInitMaskPositively && d_inMask != d_outMask && d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U)) { if (tbDoAtomicCompaction) { bInactiveThread = true; } else { d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U; return; } } outMaskVal = (y_offs << 16) | x_offs; } } NcvBool bPass = true; if (!tbDoAtomicCompaction || tbDoAtomicCompaction) { Ncv32f pixelStdDev = 0.0f; if (!bInactiveThread) pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++) { Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset(); Ncv32f stageThreshold = curStage.getStageThreshold(); while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u iNode = curRootNodeOffset; if (bPass && !bInactiveThread) { while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset++; } if (curStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; } } } __syncthreads(); if (!tbDoAtomicCompaction) { if (!tbReadPixelIndexFromVector || (tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask))) { d_outMask[maskOffset] = outMaskVal; } } else { compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread, outMaskVal, d_outMask); } } template <NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x; if (maskOffset >= mask1Dlen) { return; } Ncv32u outMaskVal = d_inMask[maskOffset]; Ncv32u y_offs = outMaskVal >> 16; Ncv32u x_offs = outMaskVal & 0xFFFF; Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; NcvBool bPass = true; for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++) { //this variable is subject to reduction Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x; Ncv32f stageThreshold = curStage.getStageThreshold(); Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2; for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++) { NcvBool bMoreNodesToTraverse = true; if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage) { Ncv32u iNode = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; //TODO: fetch into shmem if size suffices. Shmem can be shared with reduce for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL; } Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum); if (finalStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } if (!tbDoAtomicCompaction) { if (!bPass || d_inMask != d_outMask) { if (!threadIdx.x) { d_outMask[maskOffset] = outMaskVal; } } } else { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 if (bPass && !threadIdx.x) { Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1); d_outMask[outMaskOffset] = outMaskVal; } #endif } } template <NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction> __global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { Ncv32u y_offs = blockIdx.y; Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs; Ncv32u y_offs_upsc = step * y_offs; Ncv32u x_offs_upsc = step * x_offs; Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc; Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U; if (x_offs_upsc < anchorsRoi.width && (!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U)) { outElem = (y_offs_upsc << 16) | x_offs_upsc; } if (!tbDoAtomicCompaction) { d_outMask[outMaskOffset] = outElem; } else { compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U, outElem, d_outMask); } } struct applyHaarClassifierAnchorParallelFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; hipLaunchKernelGGL(( applyHaarClassifierAnchorParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value, Loki::TL::TypeAt<TList, 3>::Result::value, Loki::TL::TypeAt<TList, 4>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor> ::call( &functor, tbInitMaskPositively, tbCacheTextureIImg, tbCacheTextureCascade, tbReadPixelIndexFromVector, tbDoAtomicCompaction); } struct applyHaarClassifierClassifierParallelFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; hipLaunchKernelGGL(( applyHaarClassifierClassifierParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor> ::call( &functor, tbCacheTextureIImg, tbCacheTextureCascade, tbDoAtomicCompaction); } struct initializeMaskVectorFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u step; //Arguments are passed through the constructor initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _step) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), step(_step) {} template<class TList> void call(TList tl) { (void)tl; hipLaunchKernelGGL(( initializeMaskVector < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); } }; void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor> ::call( &functor, tbMaskByInmask, tbDoAtomicCompaction); } Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages) { Ncv32u i = 0; for (; i<haar.NumStages; i++) { if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N) { break; } } return i; } NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral, NCVMatrix<Ncv32f> &d_weights, NCVMatrixAlloc<Ncv32u> &d_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea, INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, hipDeviceProp_t &devProp, hipStream_t cuStream) { ncvAssertReturn(integral.memType() == d_weights.memType()&& integral.memType() == d_pixelMask.memType() && integral.memType() == gpuAllocator.memType() && (integral.memType() == NCVMemoryTypeDevice || integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height && d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height && integral.width() >= anchorsRoi.width + haar.ClassifierSize.width && integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); #if defined _SELF_TEST_ NCVStatus ncvStat; NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch); ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch); ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length); ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length); ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN ncvStat = d_pixelMask.copySolid(h_pixelMask, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = integral.copySolid(h_integralImage, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_weights.copySolid(h_weights, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(0), NCV_CUDA_ERROR); for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++) { for (Ncv32u j=0; j<d_pixelMask.stride(); j++) { if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width)) { if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U) { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j; } } else { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U; } } } NCV_SKIP_COND_END #endif NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride()); ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length())); ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2); ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); Ncv32u *hp_zero = &hp_pool32u.ptr()[0]; Ncv32u *hp_numDet = &hp_pool32u.ptr()[1]; NCV_SKIP_COND_BEGIN *hp_zero = 0; *hp_numDet = 0; NCV_SKIP_COND_END Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); NcvBool bTexCacheCascade = devProp.major < 2; NcvBool bTexCacheIImg = true; //this works better even on Fermi so far NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3); NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask; NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp; Ncv32u szNppCompactTmpBuf; nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp); if (bDoAtomicCompaction) { szNppCompactTmpBuf = 0; } NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf); NCV_SKIP_COND_BEGIN if (bTexCacheIImg) { hipChannelFormatDesc cfdTexIImage; cfdTexIImage = hipCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage, (anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } if (bTexCacheCascade) { hipChannelFormatDesc cfdTexHaarFeatures; hipChannelFormatDesc cfdTexHaarClassifierNodes; cfdTexHaarFeatures = hipCreateChannelDesc<uint2>(); cfdTexHaarClassifierNodes = hipCreateChannelDesc<uint4>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarFeatures, d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarClassifierNodes, d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } Ncv32u stageStartAnchorParallel = 0; Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL, haar, h_HaarStages); Ncv32u stageEndClassifierParallel = haar.NumStages; if (stageMiddleSwitch == 0) { stageMiddleSwitch = 1; } //create stages subdivision for pixel-parallel processing const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1; Ncv32u curStop = stageStartAnchorParallel; std::vector<Ncv32u> pixParallelStageStops; while (curStop < stageMiddleSwitch) { pixParallelStageStops.push_back(curStop); curStop += compactEveryNstage; } if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2) { pixParallelStageStops[pixParallelStageStops.size()-1] = (stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2; } pixParallelStageStops.push_back(stageMiddleSwitch); Ncv32u pixParallelStageStopsIndex = 0; if (pixelStep != 1 || bMaskElements) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), (anchorsRoi.height + pixelStep - 1) / pixelStep); dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL); if (gridInit.x == 0 || gridInit.y == 0) { numDetections = 0; return NCV_SUCCESS; } initializeMaskVectorDynTemplate(bMaskElements, bDoAtomicCompaction, gridInit, blockInit, cuStream, d_ptrNowData->ptr(), d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(), anchorsRoi, pixelStep); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); swap(d_ptrNowData, d_ptrNowTmp); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR); } numDetections = *hp_numDet; } else { // // 1. Run the first pixel-input pixel-parallel classifier for few stages // if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), anchorsRoi.height); dim3 block1(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( true, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid1, block1, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), 0, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; pixParallelStageStopsIndex++; } // // 2. Run pixel-parallel stages // for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++) { if (numDetections == 0) { break; } if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL); if (numDetections > MAX_GRID_DIM) { grid2.x = MAX_GRID_DIM; grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block2(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( false, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid2, block2, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } // // 3. Run all left stages in one stage-parallel kernel // if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid3(numDetections); if (numDetections > MAX_GRID_DIM) { grid3.x = MAX_GRID_DIM; grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL); applyHaarClassifierClassifierParallelDynTemplate( bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade bDoAtomicCompaction, //tbDoAtomicCompaction grid3, block3, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, stageMiddleSwitch, stageEndClassifierParallel, scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } if (d_ptrNowData != &d_vecPixelMask) { d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } #if defined _SELF_TEST_ ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections); } Ncv32u fpu_oldcw, fpu_cw; _controlfp_s(&fpu_cw, 0, 0); fpu_oldcw = fpu_cw; _controlfp_s(&fpu_cw, _PC_24, _MCW_PC); Ncv32u numDetGold; ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, bMaskElements, anchorsRoi, pixelStep, scaleArea); ncvAssertReturnNcvStat(ncvStat); _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC); bool bPass = true; if (numDetGold != numDetections) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections); bPass = false; } else { for (Ncv32u i=0; i<::max(numDetGold, numDetections) && bPass; i++) { if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i]) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]); bPass = false; } } } printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED"); #endif NCV_SKIP_COND_END return NCV_SUCCESS; } //============================================================================== // // HypothesesOperations file // //============================================================================== const Ncv32u NUM_GROW_THREADS = 128; __device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale) { NcvRect32u res; res.x = (Ncv32u)(scale * (pixel & 0xFFFF)); res.y = (Ncv32u)(scale * (pixel >> 16)); res.width = (Ncv32u)(scale * width); res.height = (Ncv32u)(scale * height); return res; } __global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements, NcvRect32u *hypotheses, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x; if (elemAddr >= numElements) { return; } hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale); } NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale, hipStream_t cuStream) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } dim3 block(NUM_GROW_THREADS); dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( growDetectionsKernel), dim3(grid), dim3(block), 0, cuStream, pixelMask.ptr(), numDetsToCopy, hypotheses.ptr() + totalDetections, rectWidth, rectHeight, curScale); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); totalDetections += numDetsToCopy; return ncvStat; } //============================================================================== // // Pipeline file // //============================================================================== NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg, NcvSize32u srcRoi, NCVVector<NcvRect32u> &d_dstRects, Ncv32u &dstNumRects, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvSize32u minObjSize, Ncv32u minNeighbors, //default 4 Ncv32f scaleStep, //default 1.2f Ncv32u pixelStep, //default 1 Ncv32u flags, //default NCVPipeObjDet_Default INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, hipDeviceProp_t &devProp, hipStream_t cuStream) { ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() && d_srcImg.memType() == gpuAllocator.memType() && (d_srcImg.memType() == NCVMemoryTypeDevice || d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 && d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height && srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height && d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); //TODO: set NPP active stream to cuStream NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); Ncv32u integralWidth = d_srcImg.width() + 1; Ncv32u integralHeight = d_srcImg.height() + 1; NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVStatus nppStat; Ncv32u szTmpBufIntegral, szTmpBufSqIntegral; nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, ::max(szTmpBufIntegral, szTmpBufSqIntegral)); ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), integral.ptr(), integral.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END dstNumRects = 0; Ncv32u lastCheckedScale = 0; NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0); std::vector<Ncv32u> scalesVector; NcvBool bFoundLargestFace = false; for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep) { Ncv32u scale = (Ncv32u)scaleIter; if (lastCheckedScale == scale) { continue; } lastCheckedScale = scale; if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width || haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height) { continue; } NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRo_i.width = srcRoi_.width + 1; srcIIRo_i.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRo_i.width / scale; scaledIIRoi.height = srcIIRo_i.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; if (searchRoi.width <= 0 || searchRoi.height <= 0) { break; } scalesVector.push_back(scale); if (gpuAllocator.isCounting()) { break; } } if (bReverseTraverseScale) { std::reverse(scalesVector.begin(), scalesVector.end()); } //TODO: handle _fair_scale_ flag for (Ncv32u i=0; i<scalesVector.size(); i++) { Ncv32u scale = scalesVector[i]; NcvSize32u srcRoi_, scaledIIRoi, searchRoi; NcvSize32u srcIIRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRoi.width = srcRoi_.width + 1; srcIIRoi.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRoi.width / scale; scaledIIRoi.height = srcIIRoi.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; NCV_SKIP_COND_BEGIN nppStat = nppiStDecimate_32u_C1R( integral.ptr(), integral.pitch(), d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStDecimate_64u_C1R( d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); const NcvRect32u rect( HAAR_STDDEV_BORDER, HAAR_STDDEV_BORDER, haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER, haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER); nppStat = nppiStRectStdDev_32f_C1R( d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), d_rectStdDev.ptr(), d_rectStdDev.pitch(), NcvSize32u(searchRoi.width, searchRoi.height), rect, (Ncv32f)scale*scale, true); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END Ncv32u detectionsOnThisScale; ncvStat = ncvApplyHaarClassifierCascade_device( d_scaledIntegralImage, d_rectStdDev, d_pixelMask, detectionsOnThisScale, haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false, searchRoi, pixelStep, (Ncv32f)scale*scale, gpuAllocator, cpuAllocator, devProp, cuStream); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_BEGIN NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment()); ncvStat = ncvGrowDetectionsVector_device( d_vecPixelMask, detectionsOnThisScale, d_hypothesesIntermediate, dstNumRects, static_cast<Ncv32u>(d_hypothesesIntermediate.length()), haar.ClassifierSize.width, haar.ClassifierSize.height, (Ncv32f)scale, cuStream); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); if (flags & NCVPipeObjDet_FindLargestObject) { if (dstNumRects == 0) { continue; } if (dstNumRects != 0) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } Ncv32u numStrongHypothesesNow = dstNumRects; ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, numStrongHypothesesNow, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (numStrongHypothesesNow > 0) { NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0]; for (Ncv32u j=1; j<numStrongHypothesesNow; j++) { if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width) { maxRect = h_hypothesesIntermediate.ptr()[j]; } } h_hypothesesIntermediate.ptr()[0] = maxRect; dstNumRects = 1; ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); bFoundLargestFace = true; break; } } NCV_SKIP_COND_END if (gpuAllocator.isCounting()) { break; } } NCVStatus ncvRetCode = NCV_SUCCESS; NCV_SKIP_COND_BEGIN if (flags & NCVPipeObjDet_FindLargestObject) { if (!bFoundLargestFace) { dstNumRects = 0; } } else { //TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left) if (dstNumRects != 0) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, dstNumRects, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (dstNumRects > d_dstRects.length()) { ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; dstNumRects = static_cast<Ncv32u>(d_dstRects.length()); } if (dstNumRects != 0) { ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); } } if (flags & NCVPipeObjDet_VisualizeInPlace) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(), d_srcImg.width(), d_srcImg.height(), d_dstRects.ptr(), dstNumRects, 255, cuStream); } NCV_SKIP_COND_END return ncvRetCode; } //============================================================================== // // Purely Host code: classifier IO, mock-ups // //============================================================================== #ifdef _SELF_TEST_ #include <float.h> #endif NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage, NCVMatrix<Ncv32f> &h_weights, NCVMatrixAlloc<Ncv32u> &h_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea) { ncvAssertReturn(h_integralImage.memType() == h_weights.memType() && h_integralImage.memType() == h_pixelMask.memType() && (h_integralImage.memType() == NCVMemoryTypeHostPageable || h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() && h_HaarStages.memType() == h_HaarFeatures.memType() && (h_HaarStages.memType() == NCVMemoryTypeHostPageable || h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height && h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height && h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width && h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(h_HaarStages.length() >= haar.NumStages && h_HaarNodes.length() >= haar.NumClassifierTotalNodes && h_HaarFeatures.length() >= haar.NumFeatures && h_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); for (Ncv32u i=0; i<anchorsRoi.height; i++) { for (Ncv32u j=0; j<h_pixelMask.stride(); j++) { if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width) { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; } else { for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++) { Ncv32f curStageSum = 0.0f; Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset(); if (iStage == 0) { if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } else { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j); } } else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u curNodeOffset = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset]; HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures(); Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect]; Ncv32u rectX, rectY, rectWidth, rectHeight; feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight); Ncv32f rectWeight = feature.getWeight(); Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride(); Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL]; Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR]; Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL]; Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR]; Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR; curNodeVal += (Ncv32f)rectSum * rectWeight; } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { curNodeOffset = nextNodeDescriptor.getNextNodeOffset(); } } curRootNodeOffset++; } Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold(); if (curStageSum < tmpStageThreshold) { //drop h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } } } } std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride()); Ncv32u i = 0; for (; i<anchorsRoi.height * h_pixelMask.stride(); i++) { if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } } numDetections = i; return NCV_SUCCESS; } NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } for (Ncv32u i=0; i<numDetsToCopy; i++) { hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale); } totalDetections += numDetsToCopy; return ncvStat; } NCVStatus loadFromXML(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures); #define NVBIN_HAAR_SIZERESERVED 16 #define NVBIN_HAAR_VERSION 0x1 static NCVStatus loadFromNVBIN(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { size_t readCount; FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); Ncv32u fsize; readCount = fread(&fsize, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fseek(fp, 0, SEEK_END); Ncv32u fsizeActual = ftell(fp); ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR); std::vector<unsigned char> fdata; fdata.resize(fsize); Ncv32u dataOffset = 0; fseek(fp, 0, SEEK_SET); readCount = fread(&fdata[0], fsize, 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); //data dataOffset = NVBIN_HAAR_SIZERESERVED; haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvSize32u); haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haarStages.resize(haar.NumStages); haarClassifierNodes.resize(haar.NumClassifierTotalNodes); haarFeatures.resize(haar.NumFeatures); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages); dataOffset += szStages; memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers); dataOffset += szClassifiers; memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures); dataOffset += szFeatures; return NCV_SUCCESS; } NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages, Ncv32u &numNodes, Ncv32u &numFeatures) { size_t readCount; NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); fext = fext.toLowerCase(); if (fext == "nvbin") { FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET); Ncv32u tmp; readCount = fread(&numStages, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&tmp, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); } else if (fext == "xml") { HaarClassifierCascadeDescriptor haar; std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); numStages = haar.NumStages; numNodes = haar.NumClassifierTotalNodes; numFeatures = haar.NumFeatures; } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } return NCV_SUCCESS; } NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); fext = fext.toLowerCase(); std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; if (fext == "nvbin") { ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else if (fext == "xml") { ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY); memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64)); memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128)); memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64)); return NCV_SUCCESS; } NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename, HaarClassifierCascadeDescriptor haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); Ncv32u dataOffset = 0; std::vector<unsigned char> fdata; fdata.resize(szStages+szClassifiers+szFeatures+1024, 0); //header *(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION; //data dataOffset = NVBIN_HAAR_SIZERESERVED; *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures; dataOffset += sizeof(Ncv32u); *(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize; dataOffset += sizeof(NcvSize32u); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII; dataOffset += sizeof(NcvBool); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly; dataOffset += sizeof(NcvBool); memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages); dataOffset += szStages; memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers); dataOffset += szClassifiers; memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures); dataOffset += szFeatures; Ncv32u fsize = dataOffset; //TODO: CRC32 here //update header dataOffset = sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = fsize; FILE *fp = fopen(filename.c_str(), "wb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); fwrite(&fdata[0], fsize, 1, fp); fclose(fp); return NCV_SUCCESS; } #endif /* CUDA_DISABLER */
88723c3a51e51e8e4d60ad46d61d53cffa119fba.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Viola-Jones Object Detection Framework // // The algorithm and code are explained in the upcoming GPU Computing Gems // chapter in detail: // // Anton Obukhov, "Haar Classifiers for Object Detection with CUDA" // PDF URL placeholder // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov. // //////////////////////////////////////////////////////////////////////////////// #if !defined CUDA_DISABLER #include <algorithm> #include <cstdio> #include "NCV.hpp" #include "NCVAlg.hpp" #include "NPP_staging/NPP_staging.hpp" #include "NCVRuntimeTemplates.hpp" #include "NCVHaarObjectDetection.hpp" #include "opencv2/core/cuda/warp.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" //============================================================================== // // BlockScan file // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 __device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data) { #if __CUDA_ARCH__ >= 300 const unsigned int laneId = cv::gpu::cudev::Warp::laneId(); // scan on shuffl functions #pragma unroll for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) { const Ncv32u n = cv::gpu::cudev::shfl_up(idata, i); if (laneId >= i) idata += n; } return idata; #else Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; #endif } __device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <Ncv32u tiNumScanThreads> __device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan Ncv32u warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements Ncv32u val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // HaarClassifierCascade file // //============================================================================== const Ncv32u MAX_GRID_DIM = 65535; const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64; #define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6 #define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2) /** \internal * Haar features solid array. */ texture<uint2, 1, cudaReadModeElementType> texHaarFeatures; /** \internal * Haar classifiers flattened trees container. * Two parts: first contains root nodes, second - nodes that are referred by root nodes. * Drawback: breaks tree locality (might cause more cache misses * Advantage: No need to introduce additional 32-bit field to index root nodes offsets */ texture<uint4, 1, cudaReadModeElementType> texHaarClassifierNodes; texture<Ncv32u, 1, cudaReadModeElementType> texIImage; __device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages) { return d_Stages[iStage]; } template <NcvBool tbCacheTextureCascade> __device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes) { HaarClassifierNode128 tmpNode; if (tbCacheTextureCascade) { tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode); } else { tmpNode = d_ClassifierNodes[iNode]; } return tmpNode; } template <NcvBool tbCacheTextureCascade> __device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features, Ncv32f *weight, Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight) { HaarFeature64 feature; if (tbCacheTextureCascade) { feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature); } else { feature = d_Features[iFeature]; } feature.getRect(rectX, rectY, rectWidth, rectHeight); *weight = feature.getWeight(); } template <NcvBool tbCacheTextureIImg> __device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg) { if (tbCacheTextureIImg) { return tex1Dfetch(texIImage, x); } else { return d_IImg[x]; } } __device__ Ncv32u d_outMaskPosition; __device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 __shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2]; __shared__ Ncv32u numPassed; __shared__ Ncv32u outMaskOffset; Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem); __syncthreads(); if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1) { numPassed = incScan; outMaskOffset = atomicAdd(&d_outMaskPosition, incScan); } if (threadPassFlag) { Ncv32u excScan = incScan - threadPassFlag; shmem[excScan] = threadElem; } __syncthreads(); if (threadIdx.x < numPassed) { vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x]; } #endif } template <NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u y_offs; Ncv32u x_offs; Ncv32u maskOffset; Ncv32u outMaskVal; NcvBool bInactiveThread = false; if (tbReadPixelIndexFromVector) { maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (maskOffset >= mask1Dlen) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { outMaskVal = d_inMask[maskOffset]; y_offs = outMaskVal >> 16; x_offs = outMaskVal & 0xFFFF; } } else { y_offs = blockIdx.y; x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (x_offs >= mask2Dstride) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { maskOffset = y_offs * mask2Dstride + x_offs; if ((x_offs >= anchorsRoi.width) || (!tbInitMaskPositively && d_inMask != d_outMask && d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U)) { if (tbDoAtomicCompaction) { bInactiveThread = true; } else { d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U; return; } } outMaskVal = (y_offs << 16) | x_offs; } } NcvBool bPass = true; if (!tbDoAtomicCompaction || tbDoAtomicCompaction) { Ncv32f pixelStdDev = 0.0f; if (!bInactiveThread) pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++) { Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset(); Ncv32f stageThreshold = curStage.getStageThreshold(); while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u iNode = curRootNodeOffset; if (bPass && !bInactiveThread) { while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset++; } if (curStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; } } } __syncthreads(); if (!tbDoAtomicCompaction) { if (!tbReadPixelIndexFromVector || (tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask))) { d_outMask[maskOffset] = outMaskVal; } } else { compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread, outMaskVal, d_outMask); } } template <NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x; if (maskOffset >= mask1Dlen) { return; } Ncv32u outMaskVal = d_inMask[maskOffset]; Ncv32u y_offs = outMaskVal >> 16; Ncv32u x_offs = outMaskVal & 0xFFFF; Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; NcvBool bPass = true; for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++) { //this variable is subject to reduction Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x; Ncv32f stageThreshold = curStage.getStageThreshold(); Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2; for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++) { NcvBool bMoreNodesToTraverse = true; if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage) { Ncv32u iNode = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; //TODO: fetch into shmem if size suffices. Shmem can be shared with reduce for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL; } Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum); if (finalStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } if (!tbDoAtomicCompaction) { if (!bPass || d_inMask != d_outMask) { if (!threadIdx.x) { d_outMask[maskOffset] = outMaskVal; } } } else { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 if (bPass && !threadIdx.x) { Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1); d_outMask[outMaskOffset] = outMaskVal; } #endif } } template <NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction> __global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { Ncv32u y_offs = blockIdx.y; Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs; Ncv32u y_offs_upsc = step * y_offs; Ncv32u x_offs_upsc = step * x_offs; Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc; Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U; if (x_offs_upsc < anchorsRoi.width && (!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U)) { outElem = (y_offs_upsc << 16) | x_offs_upsc; } if (!tbDoAtomicCompaction) { d_outMask[outMaskOffset] = outElem; } else { compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U, outElem, d_outMask); } } struct applyHaarClassifierAnchorParallelFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; applyHaarClassifierAnchorParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value, Loki::TL::TypeAt<TList, 3>::Result::value, Loki::TL::TypeAt<TList, 4>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor> ::call( &functor, tbInitMaskPositively, tbCacheTextureIImg, tbCacheTextureCascade, tbReadPixelIndexFromVector, tbDoAtomicCompaction); } struct applyHaarClassifierClassifierParallelFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; applyHaarClassifierClassifierParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor> ::call( &functor, tbCacheTextureIImg, tbCacheTextureCascade, tbDoAtomicCompaction); } struct initializeMaskVectorFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u step; //Arguments are passed through the constructor initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _step) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), step(_step) {} template<class TList> void call(TList tl) { (void)tl; initializeMaskVector < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); } }; void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor> ::call( &functor, tbMaskByInmask, tbDoAtomicCompaction); } Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages) { Ncv32u i = 0; for (; i<haar.NumStages; i++) { if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N) { break; } } return i; } NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral, NCVMatrix<Ncv32f> &d_weights, NCVMatrixAlloc<Ncv32u> &d_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea, INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, cudaDeviceProp &devProp, cudaStream_t cuStream) { ncvAssertReturn(integral.memType() == d_weights.memType()&& integral.memType() == d_pixelMask.memType() && integral.memType() == gpuAllocator.memType() && (integral.memType() == NCVMemoryTypeDevice || integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height && d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height && integral.width() >= anchorsRoi.width + haar.ClassifierSize.width && integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); #if defined _SELF_TEST_ NCVStatus ncvStat; NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch); ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch); ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length); ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length); ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN ncvStat = d_pixelMask.copySolid(h_pixelMask, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = integral.copySolid(h_integralImage, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_weights.copySolid(h_weights, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR); for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++) { for (Ncv32u j=0; j<d_pixelMask.stride(); j++) { if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width)) { if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U) { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j; } } else { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U; } } } NCV_SKIP_COND_END #endif NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride()); ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length())); ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2); ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); Ncv32u *hp_zero = &hp_pool32u.ptr()[0]; Ncv32u *hp_numDet = &hp_pool32u.ptr()[1]; NCV_SKIP_COND_BEGIN *hp_zero = 0; *hp_numDet = 0; NCV_SKIP_COND_END Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); NcvBool bTexCacheCascade = devProp.major < 2; NcvBool bTexCacheIImg = true; //this works better even on Fermi so far NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3); NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask; NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp; Ncv32u szNppCompactTmpBuf; nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp); if (bDoAtomicCompaction) { szNppCompactTmpBuf = 0; } NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf); NCV_SKIP_COND_BEGIN if (bTexCacheIImg) { cudaChannelFormatDesc cfdTexIImage; cfdTexIImage = cudaCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage, (anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } if (bTexCacheCascade) { cudaChannelFormatDesc cfdTexHaarFeatures; cudaChannelFormatDesc cfdTexHaarClassifierNodes; cfdTexHaarFeatures = cudaCreateChannelDesc<uint2>(); cfdTexHaarClassifierNodes = cudaCreateChannelDesc<uint4>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarFeatures, d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarClassifierNodes, d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } Ncv32u stageStartAnchorParallel = 0; Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL, haar, h_HaarStages); Ncv32u stageEndClassifierParallel = haar.NumStages; if (stageMiddleSwitch == 0) { stageMiddleSwitch = 1; } //create stages subdivision for pixel-parallel processing const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1; Ncv32u curStop = stageStartAnchorParallel; std::vector<Ncv32u> pixParallelStageStops; while (curStop < stageMiddleSwitch) { pixParallelStageStops.push_back(curStop); curStop += compactEveryNstage; } if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2) { pixParallelStageStops[pixParallelStageStops.size()-1] = (stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2; } pixParallelStageStops.push_back(stageMiddleSwitch); Ncv32u pixParallelStageStopsIndex = 0; if (pixelStep != 1 || bMaskElements) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), (anchorsRoi.height + pixelStep - 1) / pixelStep); dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL); if (gridInit.x == 0 || gridInit.y == 0) { numDetections = 0; return NCV_SUCCESS; } initializeMaskVectorDynTemplate(bMaskElements, bDoAtomicCompaction, gridInit, blockInit, cuStream, d_ptrNowData->ptr(), d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(), anchorsRoi, pixelStep); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); swap(d_ptrNowData, d_ptrNowTmp); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR); } numDetections = *hp_numDet; } else { // // 1. Run the first pixel-input pixel-parallel classifier for few stages // if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), anchorsRoi.height); dim3 block1(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( true, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid1, block1, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), 0, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; pixParallelStageStopsIndex++; } // // 2. Run pixel-parallel stages // for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++) { if (numDetections == 0) { break; } if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL); if (numDetections > MAX_GRID_DIM) { grid2.x = MAX_GRID_DIM; grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block2(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( false, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid2, block2, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } // // 3. Run all left stages in one stage-parallel kernel // if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid3(numDetections); if (numDetections > MAX_GRID_DIM) { grid3.x = MAX_GRID_DIM; grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL); applyHaarClassifierClassifierParallelDynTemplate( bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade bDoAtomicCompaction, //tbDoAtomicCompaction grid3, block3, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, stageMiddleSwitch, stageEndClassifierParallel, scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } if (d_ptrNowData != &d_vecPixelMask) { d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } #if defined _SELF_TEST_ ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections); } Ncv32u fpu_oldcw, fpu_cw; _controlfp_s(&fpu_cw, 0, 0); fpu_oldcw = fpu_cw; _controlfp_s(&fpu_cw, _PC_24, _MCW_PC); Ncv32u numDetGold; ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, bMaskElements, anchorsRoi, pixelStep, scaleArea); ncvAssertReturnNcvStat(ncvStat); _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC); bool bPass = true; if (numDetGold != numDetections) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections); bPass = false; } else { for (Ncv32u i=0; i<std::max(numDetGold, numDetections) && bPass; i++) { if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i]) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]); bPass = false; } } } printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED"); #endif NCV_SKIP_COND_END return NCV_SUCCESS; } //============================================================================== // // HypothesesOperations file // //============================================================================== const Ncv32u NUM_GROW_THREADS = 128; __device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale) { NcvRect32u res; res.x = (Ncv32u)(scale * (pixel & 0xFFFF)); res.y = (Ncv32u)(scale * (pixel >> 16)); res.width = (Ncv32u)(scale * width); res.height = (Ncv32u)(scale * height); return res; } __global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements, NcvRect32u *hypotheses, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x; if (elemAddr >= numElements) { return; } hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale); } NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale, cudaStream_t cuStream) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } dim3 block(NUM_GROW_THREADS); dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } growDetectionsKernel<<<grid, block, 0, cuStream>>>(pixelMask.ptr(), numDetsToCopy, hypotheses.ptr() + totalDetections, rectWidth, rectHeight, curScale); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); totalDetections += numDetsToCopy; return ncvStat; } //============================================================================== // // Pipeline file // //============================================================================== NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg, NcvSize32u srcRoi, NCVVector<NcvRect32u> &d_dstRects, Ncv32u &dstNumRects, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvSize32u minObjSize, Ncv32u minNeighbors, //default 4 Ncv32f scaleStep, //default 1.2f Ncv32u pixelStep, //default 1 Ncv32u flags, //default NCVPipeObjDet_Default INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, cudaDeviceProp &devProp, cudaStream_t cuStream) { ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() && d_srcImg.memType() == gpuAllocator.memType() && (d_srcImg.memType() == NCVMemoryTypeDevice || d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 && d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height && srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height && d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); //TODO: set NPP active stream to cuStream NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); Ncv32u integralWidth = d_srcImg.width() + 1; Ncv32u integralHeight = d_srcImg.height() + 1; NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVStatus nppStat; Ncv32u szTmpBufIntegral, szTmpBufSqIntegral; nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, std::max(szTmpBufIntegral, szTmpBufSqIntegral)); ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), integral.ptr(), integral.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END dstNumRects = 0; Ncv32u lastCheckedScale = 0; NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0); std::vector<Ncv32u> scalesVector; NcvBool bFoundLargestFace = false; for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep) { Ncv32u scale = (Ncv32u)scaleIter; if (lastCheckedScale == scale) { continue; } lastCheckedScale = scale; if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width || haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height) { continue; } NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRo_i.width = srcRoi_.width + 1; srcIIRo_i.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRo_i.width / scale; scaledIIRoi.height = srcIIRo_i.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; if (searchRoi.width <= 0 || searchRoi.height <= 0) { break; } scalesVector.push_back(scale); if (gpuAllocator.isCounting()) { break; } } if (bReverseTraverseScale) { std::reverse(scalesVector.begin(), scalesVector.end()); } //TODO: handle _fair_scale_ flag for (Ncv32u i=0; i<scalesVector.size(); i++) { Ncv32u scale = scalesVector[i]; NcvSize32u srcRoi_, scaledIIRoi, searchRoi; NcvSize32u srcIIRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRoi.width = srcRoi_.width + 1; srcIIRoi.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRoi.width / scale; scaledIIRoi.height = srcIIRoi.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; NCV_SKIP_COND_BEGIN nppStat = nppiStDecimate_32u_C1R( integral.ptr(), integral.pitch(), d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStDecimate_64u_C1R( d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); const NcvRect32u rect( HAAR_STDDEV_BORDER, HAAR_STDDEV_BORDER, haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER, haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER); nppStat = nppiStRectStdDev_32f_C1R( d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), d_rectStdDev.ptr(), d_rectStdDev.pitch(), NcvSize32u(searchRoi.width, searchRoi.height), rect, (Ncv32f)scale*scale, true); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END Ncv32u detectionsOnThisScale; ncvStat = ncvApplyHaarClassifierCascade_device( d_scaledIntegralImage, d_rectStdDev, d_pixelMask, detectionsOnThisScale, haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false, searchRoi, pixelStep, (Ncv32f)scale*scale, gpuAllocator, cpuAllocator, devProp, cuStream); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_BEGIN NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment()); ncvStat = ncvGrowDetectionsVector_device( d_vecPixelMask, detectionsOnThisScale, d_hypothesesIntermediate, dstNumRects, static_cast<Ncv32u>(d_hypothesesIntermediate.length()), haar.ClassifierSize.width, haar.ClassifierSize.height, (Ncv32f)scale, cuStream); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); if (flags & NCVPipeObjDet_FindLargestObject) { if (dstNumRects == 0) { continue; } if (dstNumRects != 0) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } Ncv32u numStrongHypothesesNow = dstNumRects; ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, numStrongHypothesesNow, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (numStrongHypothesesNow > 0) { NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0]; for (Ncv32u j=1; j<numStrongHypothesesNow; j++) { if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width) { maxRect = h_hypothesesIntermediate.ptr()[j]; } } h_hypothesesIntermediate.ptr()[0] = maxRect; dstNumRects = 1; ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); bFoundLargestFace = true; break; } } NCV_SKIP_COND_END if (gpuAllocator.isCounting()) { break; } } NCVStatus ncvRetCode = NCV_SUCCESS; NCV_SKIP_COND_BEGIN if (flags & NCVPipeObjDet_FindLargestObject) { if (!bFoundLargestFace) { dstNumRects = 0; } } else { //TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left) if (dstNumRects != 0) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, dstNumRects, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (dstNumRects > d_dstRects.length()) { ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; dstNumRects = static_cast<Ncv32u>(d_dstRects.length()); } if (dstNumRects != 0) { ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); } } if (flags & NCVPipeObjDet_VisualizeInPlace) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(), d_srcImg.width(), d_srcImg.height(), d_dstRects.ptr(), dstNumRects, 255, cuStream); } NCV_SKIP_COND_END return ncvRetCode; } //============================================================================== // // Purely Host code: classifier IO, mock-ups // //============================================================================== #ifdef _SELF_TEST_ #include <float.h> #endif NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage, NCVMatrix<Ncv32f> &h_weights, NCVMatrixAlloc<Ncv32u> &h_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea) { ncvAssertReturn(h_integralImage.memType() == h_weights.memType() && h_integralImage.memType() == h_pixelMask.memType() && (h_integralImage.memType() == NCVMemoryTypeHostPageable || h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() && h_HaarStages.memType() == h_HaarFeatures.memType() && (h_HaarStages.memType() == NCVMemoryTypeHostPageable || h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height && h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height && h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width && h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(h_HaarStages.length() >= haar.NumStages && h_HaarNodes.length() >= haar.NumClassifierTotalNodes && h_HaarFeatures.length() >= haar.NumFeatures && h_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); for (Ncv32u i=0; i<anchorsRoi.height; i++) { for (Ncv32u j=0; j<h_pixelMask.stride(); j++) { if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width) { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; } else { for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++) { Ncv32f curStageSum = 0.0f; Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset(); if (iStage == 0) { if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } else { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j); } } else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u curNodeOffset = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset]; HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures(); Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect]; Ncv32u rectX, rectY, rectWidth, rectHeight; feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight); Ncv32f rectWeight = feature.getWeight(); Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride(); Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL]; Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR]; Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL]; Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR]; Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR; curNodeVal += (Ncv32f)rectSum * rectWeight; } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { curNodeOffset = nextNodeDescriptor.getNextNodeOffset(); } } curRootNodeOffset++; } Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold(); if (curStageSum < tmpStageThreshold) { //drop h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } } } } std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride()); Ncv32u i = 0; for (; i<anchorsRoi.height * h_pixelMask.stride(); i++) { if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } } numDetections = i; return NCV_SUCCESS; } NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } for (Ncv32u i=0; i<numDetsToCopy; i++) { hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale); } totalDetections += numDetsToCopy; return ncvStat; } NCVStatus loadFromXML(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures); #define NVBIN_HAAR_SIZERESERVED 16 #define NVBIN_HAAR_VERSION 0x1 static NCVStatus loadFromNVBIN(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { size_t readCount; FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); Ncv32u fsize; readCount = fread(&fsize, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fseek(fp, 0, SEEK_END); Ncv32u fsizeActual = ftell(fp); ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR); std::vector<unsigned char> fdata; fdata.resize(fsize); Ncv32u dataOffset = 0; fseek(fp, 0, SEEK_SET); readCount = fread(&fdata[0], fsize, 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); //data dataOffset = NVBIN_HAAR_SIZERESERVED; haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvSize32u); haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haarStages.resize(haar.NumStages); haarClassifierNodes.resize(haar.NumClassifierTotalNodes); haarFeatures.resize(haar.NumFeatures); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages); dataOffset += szStages; memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers); dataOffset += szClassifiers; memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures); dataOffset += szFeatures; return NCV_SUCCESS; } NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages, Ncv32u &numNodes, Ncv32u &numFeatures) { size_t readCount; NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); fext = fext.toLowerCase(); if (fext == "nvbin") { FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET); Ncv32u tmp; readCount = fread(&numStages, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&tmp, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); } else if (fext == "xml") { HaarClassifierCascadeDescriptor haar; std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); numStages = haar.NumStages; numNodes = haar.NumClassifierTotalNodes; numFeatures = haar.NumFeatures; } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } return NCV_SUCCESS; } NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); fext = fext.toLowerCase(); std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; if (fext == "nvbin") { ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else if (fext == "xml") { ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY); memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64)); memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128)); memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64)); return NCV_SUCCESS; } NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename, HaarClassifierCascadeDescriptor haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); Ncv32u dataOffset = 0; std::vector<unsigned char> fdata; fdata.resize(szStages+szClassifiers+szFeatures+1024, 0); //header *(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION; //data dataOffset = NVBIN_HAAR_SIZERESERVED; *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures; dataOffset += sizeof(Ncv32u); *(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize; dataOffset += sizeof(NcvSize32u); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII; dataOffset += sizeof(NcvBool); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly; dataOffset += sizeof(NcvBool); memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages); dataOffset += szStages; memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers); dataOffset += szClassifiers; memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures); dataOffset += szFeatures; Ncv32u fsize = dataOffset; //TODO: CRC32 here //update header dataOffset = sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = fsize; FILE *fp = fopen(filename.c_str(), "wb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); fwrite(&fdata[0], fsize, 1, fp); fclose(fp); return NCV_SUCCESS; } #endif /* CUDA_DISABLER */
ca305c81aa16e754f10bd0d0fa1891c1c457911f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void NesterovUpdate(int N, Dtype *g, Dtype *h, Dtype momentum, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float hi = h[i]; float hi_new = h[i] = momentum * hi + local_rate * g[i]; g[i] = (1 + momentum) * hi_new - momentum * hi; } } template <typename Dtype> void nesterov_update_gpu(int N, Dtype *g, Dtype *h, Dtype momentum, Dtype local_rate) { NesterovUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, g, h, momentum, local_rate); CUDA_POST_KERNEL_CHECK; } template void nesterov_update_gpu<float>(int, float *, float *, float, float); template void nesterov_update_gpu<double>(int, double *, double *, double, double); } // namespace caffe
ca305c81aa16e754f10bd0d0fa1891c1c457911f.cu
#include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void NesterovUpdate(int N, Dtype *g, Dtype *h, Dtype momentum, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float hi = h[i]; float hi_new = h[i] = momentum * hi + local_rate * g[i]; g[i] = (1 + momentum) * hi_new - momentum * hi; } } template <typename Dtype> void nesterov_update_gpu(int N, Dtype *g, Dtype *h, Dtype momentum, Dtype local_rate) { NesterovUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, g, h, momentum, local_rate); CUDA_POST_KERNEL_CHECK; } template void nesterov_update_gpu<float>(int, float *, float *, float, float); template void nesterov_update_gpu<double>(int, double *, double *, double, double); } // namespace caffe
71ee3917ba21d9478cb34eea98e8c8164cc173f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "relu.hh" //----------------------------------------------------------------------------- // RELU ACTIVATION //----------------------------------------------------------------------------- __global__ void activationReLUCUDA(float *A, float *B, float *C, int M, int N) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; float tmp = A[y * N + x]; B[y * N + x] = tmp > 0 ? tmp : 0; C[y * N + x] = tmp > 0 ? 1 : 0; } void ReLU::activation(Matrix &input, Matrix &output, Matrix &output_prime) { int cols = BLOCK_ROUND_UP(input.getCols()); int rows = BLOCK_ROUND_UP(input.getRows()); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(cols / dimBlock.x, rows / dimBlock.y); hipLaunchKernelGGL(( activationReLUCUDA), dim3(dimGrid), dim3(dimBlock), 0, 0, input.buff, output.buff, output_prime.buff, rows, cols); }
71ee3917ba21d9478cb34eea98e8c8164cc173f9.cu
#include "relu.hh" //----------------------------------------------------------------------------- // RELU ACTIVATION //----------------------------------------------------------------------------- __global__ void activationReLUCUDA(float *A, float *B, float *C, int M, int N) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; float tmp = A[y * N + x]; B[y * N + x] = tmp > 0 ? tmp : 0; C[y * N + x] = tmp > 0 ? 1 : 0; } void ReLU::activation(Matrix &input, Matrix &output, Matrix &output_prime) { int cols = BLOCK_ROUND_UP(input.getCols()); int rows = BLOCK_ROUND_UP(input.getRows()); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(cols / dimBlock.x, rows / dimBlock.y); activationReLUCUDA<<<dimGrid, dimBlock>>> (input.buff, output.buff, output_prime.buff, rows, cols); }
f66cc0470ea7506b54109ee711c2f1fefde86c22.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <array/NDArray.h> #include <array/NDArrayFactory.h> #include <graph/Context.h> #include <graph/Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <ops/specials_cuda.h> #include <helpers/TAD.h> #include <helpers/MmulHelper.h> #include <hip/hip_runtime.h> using namespace sd; using namespace sd::graph; class CudaBasicsTests2 : public testing::Test { public: }; TEST_F(CudaBasicsTests2, test_devices_1) { auto caps = Environment::getInstance().capabilities(); ASSERT_FALSE(caps.empty()); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_1) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_2) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::DOUBLE); NDArray exp('f', {M,N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_3) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::DOUBLE); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_4) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {0.1, 2.5, 4.9, 7.3, 9.7,0.3, 2.7, 5.1, 7.5, 9.9,0.5, 2.9, 5.3, 7.7, 10.1}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); // NDArray* pA = a.permute({1,0}); // NDArray* pB = b.permute({1,0}); // NDArray* pC = c.permute({1,0}); // sd::MmulHelper::mmul(pB, pA, pC, 1., 0.); // ASSERT_TRUE(c.equalsTo(&exp)); // delete pA; // delete pB; // delete pC; } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_5) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::DOUBLE); NDArray exp('f', {M,N}, {-8.8, -4.3, 0.2, 8.6, 4.1, -0.4, -8.4, -3.9, 0.6, 8.2, 3.7, -0.8, -8.0, -3.5, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_6) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-1.6, -0.8, -0.0, 0.8, 1.6, -0.7, 0.1, 0.9, 1.7, 2.5, 0.2, 1.0, 1.8, 2.6, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_7) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-1.9, 1.3, -0.7, 0.1, 0.5, -0.9, 0.3, 0.3, -0.9, 1.5, 0.1, -0.7, 1.3, -1.9, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_8) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_9) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_10) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_11) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_12) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 4; const Nd4jLong K = 4; const Nd4jLong N = 4; NDArray a('f', {M,K}, {1.,2,3,4,5,6,7,8,9,2,3,2,1,0,4,7.}, sd::DataType::INT8); NDArray b('f', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-1,2,-2,3,-4,5,-6.}, sd::DataType::INT8); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-16., -22., -23., -25., 30., -12., -38., -70., 20., 16., 18., 18., 22., -8., -28., -52.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_13) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::INT8); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-109., -122., -135., 111., 120., 129., -121., -134., -147., 129., 144., 159., -130., -140., -150.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_14) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::INT8); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_15) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_16) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_17) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_18) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::HALF); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_19) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::HALF); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_20) { int devCnt = 0; hipGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::HALF); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } /* ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_21) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_22) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_23) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_24) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_25) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::HALF); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_26) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; // 3x4 * 4x5 = 3x5 NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT64); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_27) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_28) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } */ ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_1) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_2) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_3) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_4) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_5) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_6) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_7) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_8) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {N,M,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(4, {1,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {6.2, 4.5, 1.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_9) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_10) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_11) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('c', {5,N,M}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(13, {0,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-12.1, -10.9, -9.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_12) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('c', {5,N,M}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0,2}); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_13) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}, true); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_14) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('c', {5,N,M}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0,2}, true); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_15) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y = temp(17, {0,2}); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_16) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y = temp1(17, {0,2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_17) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y = temp(17, {0,2}, true); // y.printShapeInfo(); NDArray exp('f', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_18) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1},true); NDArray y = temp1(17, {0,2},true); NDArray exp('c', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// /* TEST_F(CudaBasicsTests2, mmulMxV_19) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_20) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_21) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::FLOAT32); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_22) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_23) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_24) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2},true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_25) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}, true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_26) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1}); NDArray y = temp1(17, {0,2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_27) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1},true); NDArray y = temp1(17, {0,2},true); NDArray exp('c', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_28) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_1) { const Nd4jLong N = 4; NDArray x('c', {N}, {1, 2, 3, 4}, sd::DataType::INT32); NDArray y('f', {N}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_2) { const Nd4jLong N = 4; NDArray x('c', {1,1,N}, {1,2, 3, 4}, sd::DataType::INT32); NDArray y('f', {1,1,N,1,1,1}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_3) { const Nd4jLong N = 4; NDArray xBig('c', {4,2}, {1, 0, 2, 0, 3, 0, 4, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}, true); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_4) { const Nd4jLong N = 4; NDArray xBig('f', {4,2}, {1, 2, 3, 4, 0, 0, 0, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } */
f66cc0470ea7506b54109ee711c2f1fefde86c22.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <array/NDArray.h> #include <array/NDArrayFactory.h> #include <graph/Context.h> #include <graph/Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <ops/specials_cuda.h> #include <helpers/TAD.h> #include <helpers/MmulHelper.h> #include <cuda.h> using namespace sd; using namespace sd::graph; class CudaBasicsTests2 : public testing::Test { public: }; TEST_F(CudaBasicsTests2, test_devices_1) { auto caps = Environment::getInstance().capabilities(); ASSERT_FALSE(caps.empty()); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_1) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_2) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::DOUBLE); NDArray exp('f', {M,N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_3) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::DOUBLE); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_4) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {0.1, 2.5, 4.9, 7.3, 9.7,0.3, 2.7, 5.1, 7.5, 9.9,0.5, 2.9, 5.3, 7.7, 10.1}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); // NDArray* pA = a.permute({1,0}); // NDArray* pB = b.permute({1,0}); // NDArray* pC = c.permute({1,0}); // sd::MmulHelper::mmul(pB, pA, pC, 1., 0.); // ASSERT_TRUE(c.equalsTo(&exp)); // delete pA; // delete pB; // delete pC; } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_5) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::DOUBLE); NDArray exp('f', {M,N}, {-8.8, -4.3, 0.2, 8.6, 4.1, -0.4, -8.4, -3.9, 0.6, 8.2, 3.7, -0.8, -8.0, -3.5, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_6) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-1.6, -0.8, -0.0, 0.8, 1.6, -0.7, 0.1, 0.9, 1.7, 2.5, 0.2, 1.0, 1.8, 2.6, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_7) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-1.9, 1.3, -0.7, 0.1, 0.5, -0.9, 0.3, 0.3, -0.9, 1.5, 0.1, -0.7, 1.3, -1.9, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_8) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_9) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_10) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_11) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_12) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 4; const Nd4jLong K = 4; const Nd4jLong N = 4; NDArray a('f', {M,K}, {1.,2,3,4,5,6,7,8,9,2,3,2,1,0,4,7.}, sd::DataType::INT8); NDArray b('f', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-1,2,-2,3,-4,5,-6.}, sd::DataType::INT8); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-16., -22., -23., -25., 30., -12., -38., -70., 20., 16., 18., 18., 22., -8., -28., -52.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_13) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::INT8); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-109., -122., -135., 111., 120., 129., -121., -134., -147., 129., 144., 159., -130., -140., -150.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_14) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::INT8); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_15) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_16) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_17) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_18) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::HALF); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_19) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('f', {M,N}, sd::DataType::HALF); NDArray exp('f', {M,N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_20) { int devCnt = 0; cudaGetDevice(&devCnt); if(Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::HALF); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } /* ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_21) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_22) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_23) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_24) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_25) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::HALF); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_26) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; // 3x4 * 4x5 = 3x5 NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT64); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_27) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_28) { const Nd4jLong M = 3; const Nd4jLong K = 4; const Nd4jLong N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } */ ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_1) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_2) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_3) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_4) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_5) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_6) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_7) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_8) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {N,M,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(4, {1,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {6.2, 4.5, 1.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_9) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_10) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_11) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('c', {5,N,M}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(13, {0,2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-12.1, -10.9, -9.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_12) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('c', {5,N,M}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0,2}); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_13) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}, true); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_14) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('c', {5,N,M}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0,2}, true); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_15) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y = temp(17, {0,2}); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_16) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y = temp1(17, {0,2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_17) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1}); NDArray y = temp(17, {0,2}, true); // y.printShapeInfo(); NDArray exp('f', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_18) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0,1},true); NDArray y = temp1(17, {0,2},true); NDArray exp('c', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// /* TEST_F(CudaBasicsTests2, mmulMxV_19) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_20) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_21) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::FLOAT32); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_22) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_23) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_24) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2},true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_25) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}, true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_26) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1}); NDArray y = temp1(17, {0,2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_27) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1},true); NDArray y = temp1(17, {0,2},true); NDArray exp('c', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_28) { const Nd4jLong M = 3; const Nd4jLong N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_1) { const Nd4jLong N = 4; NDArray x('c', {N}, {1, 2, 3, 4}, sd::DataType::INT32); NDArray y('f', {N}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_2) { const Nd4jLong N = 4; NDArray x('c', {1,1,N}, {1,2, 3, 4}, sd::DataType::INT32); NDArray y('f', {1,1,N,1,1,1}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_3) { const Nd4jLong N = 4; NDArray xBig('c', {4,2}, {1, 0, 2, 0, 3, 0, 4, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}, true); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_4) { const Nd4jLong N = 4; NDArray xBig('f', {4,2}, {1, 2, 3, 4, 0, 0, 0, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } */
3df8968f5a7b6c7586929af7749b62033475da78.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/hip/vol2col.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/sum.h> #include <ATen/ops/ones.h> #include <ATen/ops/slow_conv_transpose3d_native.h> #endif namespace at::native { namespace { static inline void slow_conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void slow_conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias_, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg}); slow_conv_transpose3d_shape_check( input_, Tensor(), weight_, bias_, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); Tensor bias = bias_.defined() ? bias_.contiguous() : bias_; int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Create temporary columns Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()); // Define a buffer of ones, for bias accumulation Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.const_data_ptr<scalar_t>(), n, weight.const_data_ptr<scalar_t>(), m, static_cast<scalar_t>(0), columns.mutable_data_ptr<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.const_data_ptr<scalar_t>(), k_, bias.const_data_ptr<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.mutable_data_ptr<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "slow_conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_input_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = input_depth * input_height * input_width; int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? grad_columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 'n', 'n', n, m, k, static_cast<scalar_t>(1), gemm_in_ptr, n, weight.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.mutable_data_ptr<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth; int64_t m = input_n.size(0); // n_input_plane int64_t k = input_depth * input_height * input_width; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 't', 'n', n, m, k, scale, gemm_in_ptr, k, input_n.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.mutable_data_ptr<scalar_t>(), n); } } if (grad_bias.defined()) { at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4}); } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } REGISTER_CUDA_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cuda); } // namespace at::native
3df8968f5a7b6c7586929af7749b62033475da78.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/cuda/vol2col.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/sum.h> #include <ATen/ops/ones.h> #include <ATen/ops/slow_conv_transpose3d_native.h> #endif namespace at::native { namespace { static inline void slow_conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void slow_conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias_, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg}); slow_conv_transpose3d_shape_check( input_, Tensor(), weight_, bias_, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); Tensor bias = bias_.defined() ? bias_.contiguous() : bias_; int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Create temporary columns Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()); // Define a buffer of ones, for bias accumulation Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.const_data_ptr<scalar_t>(), n, weight.const_data_ptr<scalar_t>(), m, static_cast<scalar_t>(0), columns.mutable_data_ptr<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::cuda::getCurrentCUDAStream(), columns.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.const_data_ptr<scalar_t>(), k_, bias.const_data_ptr<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.mutable_data_ptr<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "slow_conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_input_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = input_depth * input_height * input_width; int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? grad_columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 'n', 'n', n, m, k, static_cast<scalar_t>(1), gemm_in_ptr, n, weight.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.mutable_data_ptr<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}; checkAllSameGPU( "slow_conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Create temporary columns bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0); Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}, input.options()) : Tensor(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); if (need_columns) { // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.const_data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.mutable_data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth; int64_t m = input_n.size(0); // n_input_plane int64_t k = input_depth * input_height * input_width; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = need_columns ? columns.const_data_ptr<scalar_t>() : grad_output_n.const_data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 't', 'n', n, m, k, scale, gemm_in_ptr, k, input_n.const_data_ptr<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.mutable_data_ptr<scalar_t>(), n); } } if (grad_bias.defined()) { at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4}); } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation); return output; } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } REGISTER_CUDA_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cuda); } // namespace at::native
2684ceb5b799a3b12add3cf9dc5851213906fa04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include "clang/cuda.h" __global__ void add(float *a, float *b, float *c, int n) { int i = __builtin_ptx_read_ctaid_x() * __builtin_ptx_read_ntid_x() + __builtin_ptx_read_tid_x(); int j = __builtin_ptx_read_ctaid_y() * __builtin_ptx_read_ntid_y() + __builtin_ptx_read_tid_y(); if (i < n && j < n) { int idx = i * n + j; c[idx] = a[idx] + b[idx]; } }
2684ceb5b799a3b12add3cf9dc5851213906fa04.cu
#include <stdint.h> #include "clang/cuda.h" __global__ void add(float *a, float *b, float *c, int n) { int i = __builtin_ptx_read_ctaid_x() * __builtin_ptx_read_ntid_x() + __builtin_ptx_read_tid_x(); int j = __builtin_ptx_read_ctaid_y() * __builtin_ptx_read_ntid_y() + __builtin_ptx_read_tid_y(); if (i < n && j < n) { int idx = i * n + j; c[idx] = a[idx] + b[idx]; } }
73e5e48f5c3c63bebce3f09bf401dfe3302768c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHTensorMath.h" #include "THHGeneral.h" #include "THHApply.cuh" // Similar to TensorInfo as defined in THCReduceApplyUtils.h, but it preserves // the exact dimensionality of the tensor instead of flattening contiguous or // size-1 dimensions. This is required for scatter/gather kernels because we // need to know the indices along all dimensions. template <typename IndexType> struct SimpleTensorInfo { SimpleTensorInfo(THCState* state, THCudaTensor* t) { data = THCudaTensor_data(state, t); dims = THCudaTensor_nDimension(state, t); for (int d = 0; d < dims; d++) { sizes[d] = THCudaTensor_size(state, t, d); strides[d] = THCudaTensor_stride(state, t, d); } } float* data; IndexType sizes[MAX_CUTORCH_DIMS]; IndexType strides[MAX_CUTORCH_DIMS]; int dims; }; // Compute the offsets into the given tensors for a linear index. For the 't2' // tensor, dimension 'dim' is skipped. The tensors are assumed to have the same // size (with the exception of 't2' in dimension 'dim'). // This version uses a static number of dimensions. template <typename IndexType, int Dims> struct IndexToScatterGatherOffsets { static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t1, IndexType* t1Offset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = Dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; *t1Offset += curDimIndex * t1.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = Dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } }; // Same as above but using a dynamic number of dimensions. template <typename IndexType> struct IndexToScatterGatherOffsets<IndexType, -1> { static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t1, IndexType* t1Offset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = index.dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; *t1Offset += curDimIndex * t1.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = index.dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } }; template <typename IndexType, int Dims> __global__ void THCudaTensor_gatherKernel( SimpleTensorInfo<IndexType> tensor, SimpleTensorInfo<IndexType> src, SimpleTensorInfo<IndexType> index, const int dim, const IndexType totalElements) { for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x; linearId < totalElements; linearId += gridDim.x * blockDim.x) { IndexType tensorOffset = 0; IndexType srcOffset = 0; IndexType indexOffset = 0; IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim, index, &indexOffset, tensor, &tensorOffset, src, &srcOffset); IndexType indexValue = (IndexType)index.data[indexOffset] - 1; srcOffset += indexValue * src.strides[dim]; tensor.data[tensorOffset] = src.data[srcOffset]; } } #define RUN(TYPE, DIMS) \ hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCudaTensor_gather(THCState* state, THCudaTensor *tensor, THCudaTensor *src, int dim, THCudaTensor *index) { THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index)); THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 4, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCudaTensor_isSameSizeAs(state, tensor, index), 4, "Index tensor must have the same size as output tensor."); for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) { if (d != dim) { THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } const long totalElements = THCudaTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; if (!getApplyGrid(state, totalElements, grid)) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaTensor* oldTensor = NULL; if (THC_overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCudaTensor_newContiguous(state, tensor); } if (THC_canUse32BitIndexMath(state, tensor) && THC_canUse32BitIndexMath(state, src) && THC_canUse32BitIndexMath(state, index)) { SimpleTensorInfo<unsigned int> tensorInfo(state, tensor); SimpleTensorInfo<unsigned int> srcInfo(state, src); SimpleTensorInfo<unsigned int> indexInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1); break; case 2: RUN(unsigned int, 2); break; case 3: RUN(unsigned int, 3); break; default: RUN(unsigned int, -1); break; } } else { SimpleTensorInfo<unsigned long> tensorInfo(state, tensor); SimpleTensorInfo<unsigned long> srcInfo(state, src); SimpleTensorInfo<unsigned long> indexInfo(state, index); RUN(unsigned long, -1) } if (oldTensor) { THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCudaTensor_free(state, tensor); tensor = oldTensor; } } #undef RUN template <typename IndexType, int Dims> __global__ void THCudaTensor_scatterKernel( SimpleTensorInfo<IndexType> tensor, SimpleTensorInfo<IndexType> src, SimpleTensorInfo<IndexType> index, const int dim, const IndexType totalElements) { for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x; linearId < totalElements; linearId += gridDim.x * blockDim.x) { IndexType tensorOffset = 0; IndexType srcOffset = 0; IndexType indexOffset = 0; IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim, index, &indexOffset, src, &srcOffset, tensor, &tensorOffset); IndexType indexValue = (IndexType)index.data[indexOffset] - 1; tensorOffset += indexValue * tensor.strides[dim]; tensor.data[tensorOffset] = src.data[srcOffset]; } } #define RUN(TYPE, DIMS) \ hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCudaTensor_scatter(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, THCudaTensor *src) { THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index)); THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); THArgCheck(THCudaTensor_isSameSizeAs(state, src, index), 3, "Index tensor must have the same size as input tensor."); for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) { if (d != dim) { THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 4, "Input tensor must have same size as output tensor apart from the specified dimension"); } } if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } const long totalElements = THCudaTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; if (!getApplyGrid(state, totalElements, grid)) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaTensor* oldTensor = NULL; if (THC_overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCudaTensor_newContiguous(state, tensor); } if (THC_canUse32BitIndexMath(state, tensor) && THC_canUse32BitIndexMath(state, src) && THC_canUse32BitIndexMath(state, index)) { SimpleTensorInfo<unsigned int> tensorInfo(state, tensor); SimpleTensorInfo<unsigned int> srcInfo(state, src); SimpleTensorInfo<unsigned int> indexInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1); break; case 2: RUN(unsigned int, 2); break; case 3: RUN(unsigned int, 3); break; default: RUN(unsigned int, -1); break; } } else { SimpleTensorInfo<unsigned long> tensorInfo(state, tensor); SimpleTensorInfo<unsigned long> srcInfo(state, src); SimpleTensorInfo<unsigned long> indexInfo(state, index); RUN(unsigned long, -1) } if (oldTensor) { THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCudaTensor_free(state, tensor); tensor = oldTensor; } } #undef RUN template <typename IndexType, int Dims> __global__ void THCudaTensor_scatterFillKernel( SimpleTensorInfo<IndexType> tensor, SimpleTensorInfo<IndexType> index, float value, const int dim, const IndexType totalElements) { for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x; linearId < totalElements; linearId += gridDim.x * blockDim.x) { IndexType tensorOffset = 0; IndexType indexOffset = 0; IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim, index, &indexOffset, tensor, &tensorOffset); IndexType indexValue = (IndexType)index.data[indexOffset] - 1; tensorOffset += indexValue * tensor.strides[dim]; tensor.data[tensorOffset] = value; } } #define RUN(TYPE, DIMS) \ hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCudaTensor_scatterFill(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, float value) { THAssert(THCudaTensor_checkGPU(state, 2, tensor, index)); THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } const long totalElements = THCudaTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; if (!getApplyGrid(state, totalElements, grid)) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaTensor* oldTensor = NULL; if (THC_overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCudaTensor_newContiguous(state, tensor); } if (THC_canUse32BitIndexMath(state, tensor) && THC_canUse32BitIndexMath(state, index)) { SimpleTensorInfo<unsigned int> tensorInfo(state, tensor); SimpleTensorInfo<unsigned int> indexInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1); break; case 2: RUN(unsigned int, 2); break; case 3: RUN(unsigned int, 3); break; default: RUN(unsigned int, -1); break; } } else { SimpleTensorInfo<unsigned long> tensorInfo(state, tensor); SimpleTensorInfo<unsigned long> indexInfo(state, index); RUN(unsigned long, -1); } if (oldTensor) { THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCudaTensor_free(state, tensor); tensor = oldTensor; } } #undef RUN
73e5e48f5c3c63bebce3f09bf401dfe3302768c1.cu
#include "THCTensorMath.h" #include "THCGeneral.h" #include "THCApply.cuh" // Similar to TensorInfo as defined in THCReduceApplyUtils.h, but it preserves // the exact dimensionality of the tensor instead of flattening contiguous or // size-1 dimensions. This is required for scatter/gather kernels because we // need to know the indices along all dimensions. template <typename IndexType> struct SimpleTensorInfo { SimpleTensorInfo(THCState* state, THCudaTensor* t) { data = THCudaTensor_data(state, t); dims = THCudaTensor_nDimension(state, t); for (int d = 0; d < dims; d++) { sizes[d] = THCudaTensor_size(state, t, d); strides[d] = THCudaTensor_stride(state, t, d); } } float* data; IndexType sizes[MAX_CUTORCH_DIMS]; IndexType strides[MAX_CUTORCH_DIMS]; int dims; }; // Compute the offsets into the given tensors for a linear index. For the 't2' // tensor, dimension 'dim' is skipped. The tensors are assumed to have the same // size (with the exception of 't2' in dimension 'dim'). // This version uses a static number of dimensions. template <typename IndexType, int Dims> struct IndexToScatterGatherOffsets { static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t1, IndexType* t1Offset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = Dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; *t1Offset += curDimIndex * t1.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = Dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } }; // Same as above but using a dynamic number of dimensions. template <typename IndexType> struct IndexToScatterGatherOffsets<IndexType, -1> { static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t1, IndexType* t1Offset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = index.dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; *t1Offset += curDimIndex * t1.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } static __device__ void compute( IndexType linearId, const int dim, const SimpleTensorInfo<IndexType>& index, IndexType* indexOffset, const SimpleTensorInfo<IndexType>& t2, IndexType* t2Offset) { for (int d = index.dims - 1; d >= 0; d--) { IndexType curDimIndex = linearId % index.sizes[d]; *indexOffset += curDimIndex * index.strides[d]; if (d != dim) { *t2Offset += curDimIndex * t2.strides[d]; } linearId /= index.sizes[d]; } } }; template <typename IndexType, int Dims> __global__ void THCudaTensor_gatherKernel( SimpleTensorInfo<IndexType> tensor, SimpleTensorInfo<IndexType> src, SimpleTensorInfo<IndexType> index, const int dim, const IndexType totalElements) { for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x; linearId < totalElements; linearId += gridDim.x * blockDim.x) { IndexType tensorOffset = 0; IndexType srcOffset = 0; IndexType indexOffset = 0; IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim, index, &indexOffset, tensor, &tensorOffset, src, &srcOffset); IndexType indexValue = (IndexType)index.data[indexOffset] - 1; srcOffset += indexValue * src.strides[dim]; tensor.data[tensorOffset] = src.data[srcOffset]; } } #define RUN(TYPE, DIMS) \ THCudaTensor_gatherKernel<TYPE, DIMS> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCudaTensor_gather(THCState* state, THCudaTensor *tensor, THCudaTensor *src, int dim, THCudaTensor *index) { THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index)); THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 4, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCudaTensor_isSameSizeAs(state, tensor, index), 4, "Index tensor must have the same size as output tensor."); for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) { if (d != dim) { THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } const long totalElements = THCudaTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; if (!getApplyGrid(state, totalElements, grid)) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaTensor* oldTensor = NULL; if (THC_overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCudaTensor_newContiguous(state, tensor); } if (THC_canUse32BitIndexMath(state, tensor) && THC_canUse32BitIndexMath(state, src) && THC_canUse32BitIndexMath(state, index)) { SimpleTensorInfo<unsigned int> tensorInfo(state, tensor); SimpleTensorInfo<unsigned int> srcInfo(state, src); SimpleTensorInfo<unsigned int> indexInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1); break; case 2: RUN(unsigned int, 2); break; case 3: RUN(unsigned int, 3); break; default: RUN(unsigned int, -1); break; } } else { SimpleTensorInfo<unsigned long> tensorInfo(state, tensor); SimpleTensorInfo<unsigned long> srcInfo(state, src); SimpleTensorInfo<unsigned long> indexInfo(state, index); RUN(unsigned long, -1) } if (oldTensor) { THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCudaTensor_free(state, tensor); tensor = oldTensor; } } #undef RUN template <typename IndexType, int Dims> __global__ void THCudaTensor_scatterKernel( SimpleTensorInfo<IndexType> tensor, SimpleTensorInfo<IndexType> src, SimpleTensorInfo<IndexType> index, const int dim, const IndexType totalElements) { for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x; linearId < totalElements; linearId += gridDim.x * blockDim.x) { IndexType tensorOffset = 0; IndexType srcOffset = 0; IndexType indexOffset = 0; IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim, index, &indexOffset, src, &srcOffset, tensor, &tensorOffset); IndexType indexValue = (IndexType)index.data[indexOffset] - 1; tensorOffset += indexValue * tensor.strides[dim]; tensor.data[tensorOffset] = src.data[srcOffset]; } } #define RUN(TYPE, DIMS) \ THCudaTensor_scatterKernel<TYPE, DIMS> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCudaTensor_scatter(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, THCudaTensor *src) { THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index)); THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); THArgCheck(THCudaTensor_isSameSizeAs(state, src, index), 3, "Index tensor must have the same size as input tensor."); for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) { if (d != dim) { THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 4, "Input tensor must have same size as output tensor apart from the specified dimension"); } } if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } const long totalElements = THCudaTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; if (!getApplyGrid(state, totalElements, grid)) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaTensor* oldTensor = NULL; if (THC_overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCudaTensor_newContiguous(state, tensor); } if (THC_canUse32BitIndexMath(state, tensor) && THC_canUse32BitIndexMath(state, src) && THC_canUse32BitIndexMath(state, index)) { SimpleTensorInfo<unsigned int> tensorInfo(state, tensor); SimpleTensorInfo<unsigned int> srcInfo(state, src); SimpleTensorInfo<unsigned int> indexInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1); break; case 2: RUN(unsigned int, 2); break; case 3: RUN(unsigned int, 3); break; default: RUN(unsigned int, -1); break; } } else { SimpleTensorInfo<unsigned long> tensorInfo(state, tensor); SimpleTensorInfo<unsigned long> srcInfo(state, src); SimpleTensorInfo<unsigned long> indexInfo(state, index); RUN(unsigned long, -1) } if (oldTensor) { THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCudaTensor_free(state, tensor); tensor = oldTensor; } } #undef RUN template <typename IndexType, int Dims> __global__ void THCudaTensor_scatterFillKernel( SimpleTensorInfo<IndexType> tensor, SimpleTensorInfo<IndexType> index, float value, const int dim, const IndexType totalElements) { for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x; linearId < totalElements; linearId += gridDim.x * blockDim.x) { IndexType tensorOffset = 0; IndexType indexOffset = 0; IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim, index, &indexOffset, tensor, &tensorOffset); IndexType indexValue = (IndexType)index.data[indexOffset] - 1; tensorOffset += indexValue * tensor.strides[dim]; tensor.data[tensorOffset] = value; } } #define RUN(TYPE, DIMS) \ THCudaTensor_scatterFillKernel<TYPE, DIMS> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCudaTensor_scatterFill(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, float value) { THAssert(THCudaTensor_checkGPU(state, 2, tensor, index)); THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } const long totalElements = THCudaTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; if (!getApplyGrid(state, totalElements, grid)) { return THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaTensor* oldTensor = NULL; if (THC_overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCudaTensor_newContiguous(state, tensor); } if (THC_canUse32BitIndexMath(state, tensor) && THC_canUse32BitIndexMath(state, index)) { SimpleTensorInfo<unsigned int> tensorInfo(state, tensor); SimpleTensorInfo<unsigned int> indexInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1); break; case 2: RUN(unsigned int, 2); break; case 3: RUN(unsigned int, 3); break; default: RUN(unsigned int, -1); break; } } else { SimpleTensorInfo<unsigned long> tensorInfo(state, tensor); SimpleTensorInfo<unsigned long> indexInfo(state, index); RUN(unsigned long, -1); } if (oldTensor) { THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCudaTensor_free(state, tensor); tensor = oldTensor; } } #undef RUN
a33daa6380c92f80344f8e398ffbe76f1d0efeb1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "freshman.h" #include <omp.h> #define N 300000 __global__ void kernel_1() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } __global__ void kernel_2() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } __global__ void kernel_3() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } __global__ void kernel_4() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } int main() { int n_stream=4; hipStream_t *stream=(hipStream_t*)malloc(n_stream*sizeof(hipStream_t)); for(int i=0;i<n_stream;i++) { hipStreamCreate(&stream[i]); } dim3 block(1); dim3 grid(1); hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); omp_set_num_threads(n_stream); #pragma omp parallel { int i=omp_get_thread_num(); hipLaunchKernelGGL(( kernel_1), dim3(grid),dim3(block),0,stream[i], ); hipLaunchKernelGGL(( kernel_2), dim3(grid),dim3(block),0,stream[i], ); hipLaunchKernelGGL(( kernel_3), dim3(grid),dim3(block),0,stream[i], ); hipLaunchKernelGGL(( kernel_4), dim3(grid),dim3(block),0,stream[i], ); } hipEventRecord(stop,0); CHECK(hipEventSynchronize(stop)); float elapsed_time; hipEventElapsedTime(&elapsed_time,start,stop); printf("elapsed time:%f ms\n",elapsed_time); for(int i=0;i<n_stream;i++) { hipStreamDestroy(stream[i]); } hipEventDestroy(start); hipEventDestroy(stop); free(stream); return 0; }
a33daa6380c92f80344f8e398ffbe76f1d0efeb1.cu
#include <cuda_runtime.h> #include <stdio.h> #include "freshman.h" #include <omp.h> #define N 300000 __global__ void kernel_1() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } __global__ void kernel_2() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } __global__ void kernel_3() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } __global__ void kernel_4() { double sum=0.0; for(int i=0;i<N;i++) sum=sum+tan(0.1)*tan(0.1); } int main() { int n_stream=4; cudaStream_t *stream=(cudaStream_t*)malloc(n_stream*sizeof(cudaStream_t)); for(int i=0;i<n_stream;i++) { cudaStreamCreate(&stream[i]); } dim3 block(1); dim3 grid(1); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); omp_set_num_threads(n_stream); #pragma omp parallel { int i=omp_get_thread_num(); kernel_1<<<grid,block,0,stream[i]>>>(); kernel_2<<<grid,block,0,stream[i]>>>(); kernel_3<<<grid,block,0,stream[i]>>>(); kernel_4<<<grid,block,0,stream[i]>>>(); } cudaEventRecord(stop,0); CHECK(cudaEventSynchronize(stop)); float elapsed_time; cudaEventElapsedTime(&elapsed_time,start,stop); printf("elapsed time:%f ms\n",elapsed_time); for(int i=0;i<n_stream;i++) { cudaStreamDestroy(stream[i]); } cudaEventDestroy(start); cudaEventDestroy(stop); free(stream); return 0; }
86a49ada78686a3a6da8213c9698ec773656a0ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Rippling.h" #include <iostream> #include <assert.h> #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) : Animable_I<uchar4>(grid, w, h, "Rippling_Cuda_RGBA_uchar4") { assert(w == h); // specific rippling // Inputs this->dt = dt; // Tools this->t = 0; // protected dans Animable } Rippling::~Rippling() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car pas zoomable */ void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("rippling rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release // TODO lancer le kernel avec <<<dg,db>>> // le kernel est importer ci-dessus (ligne 19) hipLaunchKernelGGL(( rippling), dim3(db), dim3(dg), 0, 0, ptrDevPixels, w , h, t); Device::lastCudaError("rippling rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release } /** * Override * Call periodicly by the API */ void Rippling::animationStep() { t += dt; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
86a49ada78686a3a6da8213c9698ec773656a0ff.cu
#include "Rippling.h" #include <iostream> #include <assert.h> #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) : Animable_I<uchar4>(grid, w, h, "Rippling_Cuda_RGBA_uchar4") { assert(w == h); // specific rippling // Inputs this->dt = dt; // Tools this->t = 0; // protected dans Animable } Rippling::~Rippling() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car pas zoomable */ void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("rippling rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release // TODO lancer le kernel avec <<<dg,db>>> // le kernel est importer ci-dessus (ligne 19) rippling<<<db, dg>>>(ptrDevPixels, w , h, t); Device::lastCudaError("rippling rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release } /** * Override * Call periodicly by the API */ void Rippling::animationStep() { t += dt; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
064fddd7cb7edc0b33c0b4df70c52dcec84ba390.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * cg.cu * Created on: July 22, 2016 * Author: Wei Tan ([email protected]) * CUDA kernels related to batch CG solver used in ALS * CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ #include "als.h" #include "device_utilities.h" #include "host_utilities.h" #include <fstream> #include <device_launch_parameters.h> #define SCAN_BATCH 24 #define CG_ERROR 1e-4 #undef DEBUG //CG (iterative solve) kernel //each block solves a A*x=b __global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; //sharedx[threadIdx.x] = 0; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //CG (iterative solve) kernel //each block solves a A*x=b and A in fp16 __global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this __global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x for(int k = threadIdx.x; k < f; k += blockDim.x) sharedx[k] = x[blockIdx.x*f + k]; __syncthreads(); //r=b-A*x; float temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i]; sharedr[k] = b[blockIdx.x*f + k] - temp; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i]; sharedap[k] = temp; } #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; } //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x) //p=r+(rsnew/rsold)*p; sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations for(int k = threadIdx.x; k < f; k += blockDim.x) //x<--sharedx x[blockIdx.x*f + k] = sharedx[k]; } void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ hipLaunchKernelGGL(( updateXWithCGKernel3), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0, (half*)A, x, b, batchSize, f, cgIter); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(hipFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ hipLaunchKernelGGL(( updateXWithCGKernel), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0, //updateXWithCGKernel2, batchSize, 96, 4*f+4)*sizeof(float)>>> (A, x, b, batchSize, f, cgIter); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(hipFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } //fused kernel, use thetaT to update XT __global__ void __launch_bounds__(64) alsUpdateFeature100(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* thetaT, float* XT, float* ythetaT, int cgIter) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //newly added CG phase //reuse the abundant shared memory float *sharedx = (float*)&thetaTemp[0]; float *sharedp = (float*)&thetaTemp[50]; float *sharedr = (float*)&thetaTemp[100]; float *sharedap = (float*)&thetaTemp[150]; float *sharedax = (float*)&thetaTemp[200]; float *rsold = (float*)&thetaTemp[250]; float *alpha = (float*)&thetaTemp[251]; float *rsnew = (float*)&thetaTemp[252]; float *beta = (float*)&thetaTemp[253]; //sharedx<--x for(int k = threadIdx.x; k < F; k += 64){ sharedx[k] = XT[blockIdx.x*F + k]; sharedax[k] = 0; } __syncthreads(); float temp = 0; //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ //add regularization if(tile_x==tile_y){ temp = (end - start) * lambda; temp0 += temp; temp11 += temp; temp22 += temp; temp33 += temp; temp44 += temp; temp55 += temp; temp66 += temp; temp77 += temp; temp88 += temp; temp99 += temp; } #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //r=b-A*x; //step1: ax=A*x atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] + temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] + temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]); atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] + temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] + temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] + temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] + temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] + temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] + temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] + temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] + temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] + temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] + temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] + temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] + temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] + temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] + temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] + temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] + temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] + temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] + temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] + temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] + temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]); atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] + temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] + temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] + temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] + temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] + temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] + temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] + temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] + temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] + temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] + temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] + temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] + temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] + temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] + temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] + temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] + temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] + temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] + temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***x:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n\n"); printf("***r=Ax:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); } #endif for(int k = threadIdx.x; k < F; k += 64){ //r=b-Ax sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k]; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } for(int k = threadIdx.x; k < F; k += 64){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif ///* //CG iterations for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; for(int k = threadIdx.x; k < F; k += 64) sharedap[k] = 0; __syncthreads(); //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] + temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] + temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]); atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] + temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] + temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] + temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] + temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] + temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] + temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] + temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] + temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] + temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] + temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] + temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] + temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] + temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] + temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] + temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] + temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] + temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] + temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] + temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] + temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]); atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] + temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] + temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] + temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] + temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] + temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] + temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] + temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] + temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] + temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] + temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] + temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] + temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] + temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] + temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] + temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] + temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] + temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] + temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG if(blockIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); } #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < F; k += 64){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; //NOT needed? //__syncthreads(); } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; for(int k = threadIdx.x; k < F; k += 64) sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx for(int k = threadIdx.x; k < F; k += 64) XT[blockIdx.x*F + k] = sharedx[k]; //*/ } } void alsUpdateFeature100Host(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){ hipLaunchKernelGGL(( alsUpdateFeature100), dim3(m), dim3(64), SCAN_BATCH * F/2*sizeof(float2), 0, batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter); hipDeviceSynchronize(); cudaCheckError(); }
064fddd7cb7edc0b33c0b4df70c52dcec84ba390.cu
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * cg.cu * Created on: July 22, 2016 * Author: Wei Tan ([email protected]) * CUDA kernels related to batch CG solver used in ALS * CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ #include "als.h" #include "device_utilities.h" #include "host_utilities.h" #include <fstream> #include <device_launch_parameters.h> #define SCAN_BATCH 24 #define CG_ERROR 1e-4 #undef DEBUG //CG (iterative solve) kernel //each block solves a A*x=b __global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; //sharedx[threadIdx.x] = 0; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //CG (iterative solve) kernel //each block solves a A*x=b and A in fp16 __global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this __global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x for(int k = threadIdx.x; k < f; k += blockDim.x) sharedx[k] = x[blockIdx.x*f + k]; __syncthreads(); //r=b-A*x; float temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i]; sharedr[k] = b[blockIdx.x*f + k] - temp; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i]; sharedap[k] = temp; } #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; } //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x) //p=r+(rsnew/rsold)*p; sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations for(int k = threadIdx.x; k < f; k += blockDim.x) //x<--sharedx x[blockIdx.x*f + k] = sharedx[k]; } void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ updateXWithCGKernel3<<<batchSize, f, (4*f+4)*sizeof(float)>>> ((half*)A, x, b, batchSize, f, cgIter); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(cudaFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ updateXWithCGKernel<<<batchSize, f, (4*f+4)*sizeof(float)>>> //updateXWithCGKernel2<<<batchSize, 96, (4*f+4)*sizeof(float)>>> (A, x, b, batchSize, f, cgIter); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(cudaFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } //fused kernel, use thetaT to update XT __global__ void __launch_bounds__(64) alsUpdateFeature100(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* thetaT, float* XT, float* ythetaT, int cgIter) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //newly added CG phase //reuse the abundant shared memory float *sharedx = (float*)&thetaTemp[0]; float *sharedp = (float*)&thetaTemp[50]; float *sharedr = (float*)&thetaTemp[100]; float *sharedap = (float*)&thetaTemp[150]; float *sharedax = (float*)&thetaTemp[200]; float *rsold = (float*)&thetaTemp[250]; float *alpha = (float*)&thetaTemp[251]; float *rsnew = (float*)&thetaTemp[252]; float *beta = (float*)&thetaTemp[253]; //sharedx<--x for(int k = threadIdx.x; k < F; k += 64){ sharedx[k] = XT[blockIdx.x*F + k]; sharedax[k] = 0; } __syncthreads(); float temp = 0; //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ //add regularization if(tile_x==tile_y){ temp = (end - start) * lambda; temp0 += temp; temp11 += temp; temp22 += temp; temp33 += temp; temp44 += temp; temp55 += temp; temp66 += temp; temp77 += temp; temp88 += temp; temp99 += temp; } #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //r=b-A*x; //step1: ax=A*x atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] + temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] + temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]); atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] + temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] + temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] + temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] + temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] + temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] + temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] + temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] + temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] + temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] + temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] + temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] + temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] + temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] + temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] + temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] + temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] + temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] + temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] + temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] + temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]); atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] + temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] + temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] + temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] + temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] + temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] + temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] + temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] + temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] + temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] + temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] + temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] + temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] + temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] + temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] + temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] + temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] + temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] + temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***x:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n\n"); printf("***r=Ax:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); } #endif for(int k = threadIdx.x; k < F; k += 64){ //r=b-Ax sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k]; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } for(int k = threadIdx.x; k < F; k += 64){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif ///* //CG iterations for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; for(int k = threadIdx.x; k < F; k += 64) sharedap[k] = 0; __syncthreads(); //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] + temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] + temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]); atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] + temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] + temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] + temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] + temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] + temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] + temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] + temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] + temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] + temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] + temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] + temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] + temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] + temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] + temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] + temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] + temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] + temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] + temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] + temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] + temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]); atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] + temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] + temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] + temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] + temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] + temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] + temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] + temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] + temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] + temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] + temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] + temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] + temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] + temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] + temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] + temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] + temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] + temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] + temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG if(blockIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); } #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < F; k += 64){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; //NOT needed? //__syncthreads(); } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; for(int k = threadIdx.x; k < F; k += 64) sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx for(int k = threadIdx.x; k < F; k += 64) XT[blockIdx.x*F + k] = sharedx[k]; //*/ } } void alsUpdateFeature100Host(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){ alsUpdateFeature100<<<m, 64, SCAN_BATCH * F/2*sizeof(float2)>>> (batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter); cudaDeviceSynchronize(); cudaCheckError(); }
8c3ab8047889a726c3096051bfa686d2e1b828f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if COMPUTE_ENTROPY_VOLUME_CUDA #define NR_OF_THREADS_PER_MARGIN 1 static texture<float, 2, hipReadModeElementType> t2dActiveVoxelMarginalEntropy; __device__ void _UpdateHistogramEntry_device ( int iActiveVoxelId, int iVoxelX, int iVoxelY, int iVoxelZ, int iXOffset, int iYOffset, int iZOffset, int iValue, int iNrOfBins, int3 i3VolumeSize, hipPitchedPtr cActiveVoxelsHistorgram_pitched ) { int iX, iY, iZ; iX = iVoxelX + iXOffset; iY = iVoxelY + iYOffset; iZ = iVoxelZ + iZOffset; int3 i3TexCoord; i3TexCoord.x = IMirrorCoord(iX, i3VolumeSize.x); i3TexCoord.y = IMirrorCoord(iY, i3VolumeSize.y); i3TexCoord.z = IMirrorCoord(iZ, i3VolumeSize.z); int iSrcBin = tex2D(t2dSrcBinVolume, i3TexCoord.x, i3TexCoord.y + i3TexCoord.z * i3VolumeSize.y); // update the joint histogram if( iSrcBin < iNrOfBins ) atomicAdd( ADDRESS_2D( int, cActiveVoxelsHistorgram_pitched.ptr, sizeof(int), cActiveVoxelsHistorgram_pitched.pitch, iActiveVoxelId, iSrcBin), iValue ); } __global__ static void _UpdateHistogram_kernel ( int3 i3BlockCorner, int3 i3KernelSize, int3 i3VolumeSize, int iNrOfBins, hipPitchedPtr cActiveVoxelsHistorgram_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; if( iVoxelX < i3VolumeSize.x && iVoxelY < i3VolumeSize.y && iVoxelZ < i3VolumeSize.z ) for(int iZDir = -1; iZDir <= +1; iZDir+=2) { int iZOffset = i3KernelSize.z * iZDir; switch (iZDir) { case -1: iZOffset--; break; } for(int iYOffset = -i3KernelSize.y; iYOffset <= i3KernelSize.y; iYOffset++) for(int iXOffset = -i3KernelSize.x; iXOffset <= i3KernelSize.x; iXOffset++) { _UpdateHistogramEntry_device ( iActiveVoxelId, iVoxelX, iVoxelY, iVoxelZ, iXOffset,iYOffset,iZOffset, iZDir, iNrOfBins, i3VolumeSize, cActiveVoxelsHistorgram_pitched ); } } } __global__ static void _CreateHistogram_kernel ( int3 i3BlockCorner, int3 i3KernelSize, int3 i3VolumeSize, int iNrOfBins, hipPitchedPtr cActiveVoxelsHistorgram_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; if( iVoxelX < i3VolumeSize.x && iVoxelY < i3VolumeSize.y && iVoxelZ < i3VolumeSize.z ) for(int iZOffset = -i3KernelSize.z; iZOffset <= i3KernelSize.z; iZOffset++) for(int iYOffset = -i3KernelSize.y; iYOffset <= i3KernelSize.y; iYOffset++) for(int iXOffset = -i3KernelSize.x; iXOffset <= i3KernelSize.x; iXOffset++) { _UpdateHistogramEntry_device ( iActiveVoxelId, iVoxelX, iVoxelY, iVoxelZ, iXOffset,iYOffset,iZOffset, +1, iNrOfBins, i3VolumeSize, cActiveVoxelsHistorgram_pitched ); } } __global__ void _ComputeEntropy_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, hipPitchedPtr cEntropyVolume_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; int iNrOfNeighbors = (2 * i3KernelSize.x + 1) * (2 * i3KernelSize.y + 1) * (2 * i3KernelSize.z + 1); float fNrOfNeighbors = float(iNrOfNeighbors); float fEntropy = 0.0f; for(int b = 0; b < iNrOfBins; b++) { int iCount = tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, b); if( iCount ) { /* float fProb = float(iCount) / fNrOfNeighbors; fEntropy += fProb * log2(fProb); */ float fCount = float(iCount); fEntropy += fCount * log2(fCount); } } // fEntropy *= -1.0; fEntropy = -fEntropy / fNrOfNeighbors + log2(fNrOfNeighbors); fEntropy = max(0.0, fEntropy); *ADDRESS_2D( float, cEntropyVolume_pitched.ptr, sizeof(float), cEntropyVolume_pitched.pitch, iVoxelX, iVoxelY + iVoxelZ * i3VolumeSize.y) = fEntropy; } __global__ void _ComputeEntropyHierarchically_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, int iGridDimX, int iGridDimY, int iLevel, hipPitchedPtr cActiveVoxelsHistorgram_pitched, hipPitchedPtr cEntropyVolume_pitched ) { int iBlockIdxX = blockIdx.x % iGridDimX; int iBlockIdxY = blockIdx.x / iGridDimX; int iActiveVoxelX = iBlockIdxX * blockDim.x + threadIdx.x; int iActiveVoxelY = iBlockIdxY * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * iGridDimX * blockDim.x; int iToBeUpdatedBin = blockIdx.y * iLevel; int iCount = ( iToBeUpdatedBin < iNrOfBins )?tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, iToBeUpdatedBin):0; int iNextCount = ( iToBeUpdatedBin + iLevel < iNrOfBins )?tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, iToBeUpdatedBin + iLevel):0; int iSum = iCount + iNextCount; *ADDRESS_2D( int, cActiveVoxelsHistorgram_pitched.ptr, sizeof(int), cActiveVoxelsHistorgram_pitched.pitch, iActiveVoxelId, iToBeUpdatedBin) = iSum; } __global__ void _ComputeMarginalEntropy_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, int iGridDimX, int iGridDimY, int iNrOfMarginalBins, int iNrOfThreadsPerMagin, hipPitchedPtr cActiveVoxelMarginalEntropy_pitched ) { int iBlockIdxX = blockIdx.x % iGridDimX; int iBlockIdxY = blockIdx.x / iGridDimX; int iActiveVoxelX = iBlockIdxX * blockDim.x + threadIdx.x; int iActiveVoxelY = iBlockIdxY * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * iGridDimX * blockDim.x; int iMargin = blockIdx.y / iNrOfThreadsPerMagin; int iThreadIdInMargin = blockIdx.y % iNrOfThreadsPerMagin; int iBase = iMargin * iNrOfMarginalBins + iThreadIdInMargin * iNrOfMarginalBins / iNrOfThreadsPerMagin; float fEntropy = 0.0f; for(int b = 0; b < iNrOfMarginalBins / iNrOfThreadsPerMagin; b++) { int iCount = tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, iBase + b); if( iCount ) { float fCount = float(iCount); fEntropy += fCount * log2(fCount); } } *ADDRESS_2D( float, cActiveVoxelMarginalEntropy_pitched.ptr, sizeof(float), cActiveVoxelMarginalEntropy_pitched.pitch, iActiveVoxelId, blockIdx.y) = fEntropy; } __global__ void _SumMarginalEntropy_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, int iNrOfMargins, int iNrOfThreadsPerMagin, hipPitchedPtr cEntropyVolume_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; int iNrOfNeighbors = (2 * i3KernelSize.x + 1) * (2 * i3KernelSize.y + 1) * (2 * i3KernelSize.z + 1); float fNrOfNeighbors = float(iNrOfNeighbors); float fEntropy = 0.0f; for(int b = 0; b < iNrOfMargins * iNrOfThreadsPerMagin; b++) { float fMarginalEntropy = tex2D(t2dActiveVoxelMarginalEntropy, iActiveVoxelId, b); fEntropy += fMarginalEntropy; } fEntropy = -fEntropy / fNrOfNeighbors + log2(fNrOfNeighbors); fEntropy = max(0.0, fEntropy); *ADDRESS_2D( float, cEntropyVolume_pitched.ptr, sizeof(float), cEntropyVolume_pitched.pitch, iVoxelX, iVoxelY + iVoxelZ * i3VolumeSize.y) = fEntropy; } ////////////////////////////////////////////////////////////////////////////// void _ComputeEntropyVolume_cuda ( // res. of the neighboring region int3 i3KernelSize, // the histogram int iNrOfBins, int *piHistogram_global, float *pfLogHistogram_global, // res. of the volume int3 i3VolumeSize, // bin volume hipPitchedPtr cBinVolume_pitched, hipPitchedPtr cEntropyVolume_pitched ) { CLOCK_INIT(_ComputeEntropyVolume_PRINT_TIMING, __FUNCTION__ ": "); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_TIMING); dim3 v3Blk = dim3(BLOCK_DIM_X, BLOCK_DIM_Y); const unsigned int iMaxMemorySpace = MAX_MEMORY_SIZE; int iMaxNrOfBlocks = int(floorf(float(iMaxMemorySpace) / float(sizeof(int) * iNrOfBins * v3Blk.x * v3Blk.y))); int iNrOfXBlocks = int(ceilf(float(i3VolumeSize.x) / float(v3Blk.x))); int iNrOfYBlocks = int(ceilf(float(i3VolumeSize.y) / float(v3Blk.y))); iMaxNrOfBlocks = min(iMaxNrOfBlocks, iNrOfXBlocks * iNrOfYBlocks); iNrOfXBlocks = min(iNrOfXBlocks, iMaxNrOfBlocks); iNrOfYBlocks = int(ceilf(float(iMaxNrOfBlocks) / float(iNrOfXBlocks))); dim3 v3Grid = dim3(iNrOfXBlocks, iNrOfYBlocks); fprintf(stderr, "MEM = %d MB; #BLOCKS = %d x %d\n", iMaxMemorySpace/(1<<20), v3Grid.x, v3Grid.y); hipPitchedPtr cActiveVoxelHistorgrams_pitched; cActiveVoxelHistorgrams_pitched.xsize = v3Grid.x * v3Grid.y * v3Blk.x * v3Blk.y; cActiveVoxelHistorgrams_pitched.ysize = iNrOfBins; CUDA_SAFE_CALL_NO_SYNC( hipMallocPitch( (void**)&cActiveVoxelHistorgrams_pitched.ptr, &cActiveVoxelHistorgrams_pitched.pitch, cActiveVoxelHistorgrams_pitched.xsize * sizeof(int), cActiveVoxelHistorgrams_pitched.ysize) ); CUDA_SAFE_CALL_NO_SYNC( hipMemset2D( cActiveVoxelHistorgrams_pitched.ptr, cActiveVoxelHistorgrams_pitched.pitch, 0, cActiveVoxelHistorgrams_pitched.xsize * sizeof(int), cActiveVoxelHistorgrams_pitched.ysize) ); // bind the input vin volume to the texture that represents the src. bin volume t2dSrcBinVolume.addressMode[0] = hipAddressModeClamp; t2dSrcBinVolume.addressMode[1] = hipAddressModeClamp; t2dSrcBinVolume.filterMode = hipFilterModePoint; t2dSrcBinVolume.normalized = false; CUDA_SAFE_CALL_NO_SYNC( hipBindTexture2D( 0, t2dSrcBinVolume, cBinVolume_pitched.ptr, hipCreateChannelDesc<int>(), cVolumeExtent_array.width, cVolumeExtent_array.height * cVolumeExtent_array.depth, cBinVolume_pitched.pitch) ); // bind the histogram as a texture t2dActiveVoxelHistorgrams.addressMode[0] = hipAddressModeClamp; t2dActiveVoxelHistorgrams.addressMode[1] = hipAddressModeClamp; t2dActiveVoxelHistorgrams.filterMode = hipFilterModePoint; t2dActiveVoxelHistorgrams.normalized = false; CUDA_SAFE_CALL_NO_SYNC( hipBindTexture2D( 0, t2dActiveVoxelHistorgrams, cActiveVoxelHistorgrams_pitched.ptr, hipCreateChannelDesc<int>(), cActiveVoxelHistorgrams_pitched.xsize, cActiveVoxelHistorgrams_pitched.ysize, cActiveVoxelHistorgrams_pitched.pitch) ); int iNrOfMarginalBins = int(sqrtf(float(iNrOfBins))); int iNrOfMargins = int(ceilf(float(iNrOfBins)/float(iNrOfMarginalBins))); hipPitchedPtr cActiveVoxelMarginalEntropy_pitched; cActiveVoxelMarginalEntropy_pitched.xsize = v3Grid.x * v3Grid.y * v3Blk.x * v3Blk.y; cActiveVoxelMarginalEntropy_pitched.ysize = iNrOfMargins * NR_OF_THREADS_PER_MARGIN; CUDA_SAFE_CALL_NO_SYNC( hipMallocPitch( (void**)&cActiveVoxelMarginalEntropy_pitched.ptr, &cActiveVoxelMarginalEntropy_pitched.pitch, cActiveVoxelMarginalEntropy_pitched.xsize * sizeof(float), cActiveVoxelMarginalEntropy_pitched.ysize) ); CUDA_SAFE_CALL_NO_SYNC( hipMemset2D( cActiveVoxelMarginalEntropy_pitched.ptr, cActiveVoxelMarginalEntropy_pitched.pitch, 0, cActiveVoxelMarginalEntropy_pitched.pitch, cActiveVoxelMarginalEntropy_pitched.ysize) ); t2dActiveVoxelMarginalEntropy.addressMode[0] = hipAddressModeClamp; t2dActiveVoxelMarginalEntropy.addressMode[1] = hipAddressModeClamp; t2dActiveVoxelMarginalEntropy.filterMode = hipFilterModePoint; t2dActiveVoxelMarginalEntropy.normalized = false; CUDA_SAFE_CALL_NO_SYNC( hipBindTexture2D( 0, t2dActiveVoxelMarginalEntropy, cActiveVoxelMarginalEntropy_pitched.ptr, hipCreateChannelDesc<float>(), cActiveVoxelMarginalEntropy_pitched.xsize, cActiveVoxelMarginalEntropy_pitched.ysize, cActiveVoxelMarginalEntropy_pitched.pitch) ); CLOCK_END(_ComputeEntropyVolume_PRINT_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_TIMING); for(int y = 0; y < i3VolumeSize.y; y += v3Grid.y * v3Blk.y) for(int x = 0; x < i3VolumeSize.x; x += v3Grid.x * v3Blk.x) for(int z = 0; z < i3VolumeSize.z; z++) { int3 i3BlockCorner = make_int3(x, y, z); CLOCK_INIT(_ComputeEntropyVolume_PRINT_LOOP_TIMING, __FUNCTION__ " (main loop): "); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_LOOP_TIMING); if( 0 == z ) { hipLaunchKernelGGL(( _CreateHistogram_kernel), dim3(v3Grid), dim3(v3Blk), 0, 0, i3BlockCorner, i3KernelSize, i3VolumeSize, iNrOfBins, cActiveVoxelHistorgrams_pitched ); CUT_CHECK_ERROR("_CreateHistogram_kernel() failed"); } else { hipLaunchKernelGGL(( _UpdateHistogram_kernel), dim3(v3Grid), dim3(v3Blk), 0, 0, i3BlockCorner, i3KernelSize, i3VolumeSize, iNrOfBins, cActiveVoxelHistorgrams_pitched ); CUT_CHECK_ERROR("_UpdateHistogram_kernel() failed"); } CLOCK_END(_ComputeEntropyVolume_PRINT_LOOP_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_LOOP_TIMING); #if SCANNING_METHOD == SCANNING_METHOD_SCAN_WHOLE_HISTOGRAM hipLaunchKernelGGL(( _ComputeEntropy_kernel), dim3(v3Grid), dim3(v3Blk), 0, 0, i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, cEntropyVolume_pitched ); CUT_CHECK_ERROR("_ComputeEntropy_kernel() failed"); #endif // #if SCANNING_METHOD == SCANNING_METHOD_SCAN_WHOLE_HISTOGRAM #if SCANNING_METHOD == SCANNING_METHOD_HIERARCHICAL_SCAN int iNrOfLevels = int(floorf(log2f(float(iNrOfBins)))); int iNrOfThreadsPerVoxel = iNrOfBins/2; for(int l = 1; l < iNrOfLevels; l++) { hipLaunchKernelGGL(( _ComputeEntropyHierarchically_kernel), dim3( dim3(v3Grid.x * v3Grid.y, iNrOfThreadsPerVoxel)), dim3( v3Blk), 0 , 0, i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, v3Grid.x, v3Grid.y, l, cActiveVoxelHistorgrams_pitched, cEntropyVolume_pitched ); iNrOfThreadsPerVoxel /= 2; } CUT_CHECK_ERROR("_ComputeEntropyHierarchically_kernel() failed"); #endif // #if SCANNING_METHOD == SCANNING_METHOD_HIERARCHICAL_SCAN #if SCANNING_METHOD == SCANNING_METHOD_SCAN_ROWS_IN_PARALLEL hipLaunchKernelGGL(( _ComputeMarginalEntropy_kernel), dim3( dim3(v3Grid.x * v3Grid.y, NR_OF_THREADS_PER_MARGIN * iNrOfMargins)), dim3(v3Blk), 0 , 0, i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, v3Grid.x, v3Grid.y, iNrOfMarginalBins, NR_OF_THREADS_PER_MARGIN, cActiveVoxelMarginalEntropy_pitched ); CUT_CHECK_ERROR("_ComputeMarginalEntropy_kernel() failed"); CLOCK_END(_ComputeEntropyVolume_PRINT_LOOP_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_LOOP_TIMING); hipLaunchKernelGGL(( _SumMarginalEntropy_kernel), dim3(v3Grid), dim3(v3Blk), 0, 0, i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, iNrOfMargins, NR_OF_THREADS_PER_MARGIN, cEntropyVolume_pitched ); CUT_CHECK_ERROR("_SumMarginalEntropy_kernel() failed"); #endif // #if SCANNING_METHOD == SCANNING_METHOD_SCAN_ROWS_IN_PARALLEL CLOCK_END(_ComputeEntropyVolume_PRINT_LOOP_TIMING, false); CLOCK_PRINT(_ComputeEntropyVolume_PRINT_LOOP_TIMING); } CLOCK_END(_ComputeEntropyVolume_PRINT_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_TIMING); FREE_MEMORY(cActiveVoxelHistorgrams_pitched.ptr); FREE_MEMORY(cActiveVoxelMarginalEntropy_pitched.ptr); CLOCK_END(_ComputeEntropyVolume_PRINT_TIMING, false); CLOCK_PRINT(_ComputeEntropyVolume_PRINT_TIMING); } #endif // #if COMPUTE_ENTROPY_VOLUME_CUDA /* $Log: not supported by cvs2svn $ */
8c3ab8047889a726c3096051bfa686d2e1b828f3.cu
#if COMPUTE_ENTROPY_VOLUME_CUDA #define NR_OF_THREADS_PER_MARGIN 1 static texture<float, 2, cudaReadModeElementType> t2dActiveVoxelMarginalEntropy; __device__ void _UpdateHistogramEntry_device ( int iActiveVoxelId, int iVoxelX, int iVoxelY, int iVoxelZ, int iXOffset, int iYOffset, int iZOffset, int iValue, int iNrOfBins, int3 i3VolumeSize, cudaPitchedPtr cActiveVoxelsHistorgram_pitched ) { int iX, iY, iZ; iX = iVoxelX + iXOffset; iY = iVoxelY + iYOffset; iZ = iVoxelZ + iZOffset; int3 i3TexCoord; i3TexCoord.x = IMirrorCoord(iX, i3VolumeSize.x); i3TexCoord.y = IMirrorCoord(iY, i3VolumeSize.y); i3TexCoord.z = IMirrorCoord(iZ, i3VolumeSize.z); int iSrcBin = tex2D(t2dSrcBinVolume, i3TexCoord.x, i3TexCoord.y + i3TexCoord.z * i3VolumeSize.y); // update the joint histogram if( iSrcBin < iNrOfBins ) atomicAdd( ADDRESS_2D( int, cActiveVoxelsHistorgram_pitched.ptr, sizeof(int), cActiveVoxelsHistorgram_pitched.pitch, iActiveVoxelId, iSrcBin), iValue ); } __global__ static void _UpdateHistogram_kernel ( int3 i3BlockCorner, int3 i3KernelSize, int3 i3VolumeSize, int iNrOfBins, cudaPitchedPtr cActiveVoxelsHistorgram_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; if( iVoxelX < i3VolumeSize.x && iVoxelY < i3VolumeSize.y && iVoxelZ < i3VolumeSize.z ) for(int iZDir = -1; iZDir <= +1; iZDir+=2) { int iZOffset = i3KernelSize.z * iZDir; switch (iZDir) { case -1: iZOffset--; break; } for(int iYOffset = -i3KernelSize.y; iYOffset <= i3KernelSize.y; iYOffset++) for(int iXOffset = -i3KernelSize.x; iXOffset <= i3KernelSize.x; iXOffset++) { _UpdateHistogramEntry_device ( iActiveVoxelId, iVoxelX, iVoxelY, iVoxelZ, iXOffset,iYOffset,iZOffset, iZDir, iNrOfBins, i3VolumeSize, cActiveVoxelsHistorgram_pitched ); } } } __global__ static void _CreateHistogram_kernel ( int3 i3BlockCorner, int3 i3KernelSize, int3 i3VolumeSize, int iNrOfBins, cudaPitchedPtr cActiveVoxelsHistorgram_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; if( iVoxelX < i3VolumeSize.x && iVoxelY < i3VolumeSize.y && iVoxelZ < i3VolumeSize.z ) for(int iZOffset = -i3KernelSize.z; iZOffset <= i3KernelSize.z; iZOffset++) for(int iYOffset = -i3KernelSize.y; iYOffset <= i3KernelSize.y; iYOffset++) for(int iXOffset = -i3KernelSize.x; iXOffset <= i3KernelSize.x; iXOffset++) { _UpdateHistogramEntry_device ( iActiveVoxelId, iVoxelX, iVoxelY, iVoxelZ, iXOffset,iYOffset,iZOffset, +1, iNrOfBins, i3VolumeSize, cActiveVoxelsHistorgram_pitched ); } } __global__ void _ComputeEntropy_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, cudaPitchedPtr cEntropyVolume_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; int iNrOfNeighbors = (2 * i3KernelSize.x + 1) * (2 * i3KernelSize.y + 1) * (2 * i3KernelSize.z + 1); float fNrOfNeighbors = float(iNrOfNeighbors); float fEntropy = 0.0f; for(int b = 0; b < iNrOfBins; b++) { int iCount = tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, b); if( iCount ) { /* float fProb = float(iCount) / fNrOfNeighbors; fEntropy += fProb * log2(fProb); */ float fCount = float(iCount); fEntropy += fCount * log2(fCount); } } // fEntropy *= -1.0; fEntropy = -fEntropy / fNrOfNeighbors + log2(fNrOfNeighbors); fEntropy = max(0.0, fEntropy); *ADDRESS_2D( float, cEntropyVolume_pitched.ptr, sizeof(float), cEntropyVolume_pitched.pitch, iVoxelX, iVoxelY + iVoxelZ * i3VolumeSize.y) = fEntropy; } __global__ void _ComputeEntropyHierarchically_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, int iGridDimX, int iGridDimY, int iLevel, cudaPitchedPtr cActiveVoxelsHistorgram_pitched, cudaPitchedPtr cEntropyVolume_pitched ) { int iBlockIdxX = blockIdx.x % iGridDimX; int iBlockIdxY = blockIdx.x / iGridDimX; int iActiveVoxelX = iBlockIdxX * blockDim.x + threadIdx.x; int iActiveVoxelY = iBlockIdxY * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * iGridDimX * blockDim.x; int iToBeUpdatedBin = blockIdx.y * iLevel; int iCount = ( iToBeUpdatedBin < iNrOfBins )?tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, iToBeUpdatedBin):0; int iNextCount = ( iToBeUpdatedBin + iLevel < iNrOfBins )?tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, iToBeUpdatedBin + iLevel):0; int iSum = iCount + iNextCount; *ADDRESS_2D( int, cActiveVoxelsHistorgram_pitched.ptr, sizeof(int), cActiveVoxelsHistorgram_pitched.pitch, iActiveVoxelId, iToBeUpdatedBin) = iSum; } __global__ void _ComputeMarginalEntropy_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, int iGridDimX, int iGridDimY, int iNrOfMarginalBins, int iNrOfThreadsPerMagin, cudaPitchedPtr cActiveVoxelMarginalEntropy_pitched ) { int iBlockIdxX = blockIdx.x % iGridDimX; int iBlockIdxY = blockIdx.x / iGridDimX; int iActiveVoxelX = iBlockIdxX * blockDim.x + threadIdx.x; int iActiveVoxelY = iBlockIdxY * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * iGridDimX * blockDim.x; int iMargin = blockIdx.y / iNrOfThreadsPerMagin; int iThreadIdInMargin = blockIdx.y % iNrOfThreadsPerMagin; int iBase = iMargin * iNrOfMarginalBins + iThreadIdInMargin * iNrOfMarginalBins / iNrOfThreadsPerMagin; float fEntropy = 0.0f; for(int b = 0; b < iNrOfMarginalBins / iNrOfThreadsPerMagin; b++) { int iCount = tex2D(t2dActiveVoxelHistorgrams, iActiveVoxelId, iBase + b); if( iCount ) { float fCount = float(iCount); fEntropy += fCount * log2(fCount); } } *ADDRESS_2D( float, cActiveVoxelMarginalEntropy_pitched.ptr, sizeof(float), cActiveVoxelMarginalEntropy_pitched.pitch, iActiveVoxelId, blockIdx.y) = fEntropy; } __global__ void _SumMarginalEntropy_kernel ( int3 i3BlockCorner, int iNrOfBins, int3 i3KernelSize, int3 i3VolumeSize, int iNrOfMargins, int iNrOfThreadsPerMagin, cudaPitchedPtr cEntropyVolume_pitched ) { int iActiveVoxelX = blockIdx.x * blockDim.x + threadIdx.x; int iActiveVoxelY = blockIdx.y * blockDim.y + threadIdx.y; int iActiveVoxelId = iActiveVoxelX + iActiveVoxelY * gridDim.x * blockDim.x; int iVoxelX = i3BlockCorner.x + iActiveVoxelX; int iVoxelY = i3BlockCorner.y + iActiveVoxelY; int iVoxelZ = i3BlockCorner.z; int iNrOfNeighbors = (2 * i3KernelSize.x + 1) * (2 * i3KernelSize.y + 1) * (2 * i3KernelSize.z + 1); float fNrOfNeighbors = float(iNrOfNeighbors); float fEntropy = 0.0f; for(int b = 0; b < iNrOfMargins * iNrOfThreadsPerMagin; b++) { float fMarginalEntropy = tex2D(t2dActiveVoxelMarginalEntropy, iActiveVoxelId, b); fEntropy += fMarginalEntropy; } fEntropy = -fEntropy / fNrOfNeighbors + log2(fNrOfNeighbors); fEntropy = max(0.0, fEntropy); *ADDRESS_2D( float, cEntropyVolume_pitched.ptr, sizeof(float), cEntropyVolume_pitched.pitch, iVoxelX, iVoxelY + iVoxelZ * i3VolumeSize.y) = fEntropy; } ////////////////////////////////////////////////////////////////////////////// void _ComputeEntropyVolume_cuda ( // res. of the neighboring region int3 i3KernelSize, // the histogram int iNrOfBins, int *piHistogram_global, float *pfLogHistogram_global, // res. of the volume int3 i3VolumeSize, // bin volume cudaPitchedPtr cBinVolume_pitched, cudaPitchedPtr cEntropyVolume_pitched ) { CLOCK_INIT(_ComputeEntropyVolume_PRINT_TIMING, __FUNCTION__ ": "); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_TIMING); dim3 v3Blk = dim3(BLOCK_DIM_X, BLOCK_DIM_Y); const unsigned int iMaxMemorySpace = MAX_MEMORY_SIZE; int iMaxNrOfBlocks = int(floorf(float(iMaxMemorySpace) / float(sizeof(int) * iNrOfBins * v3Blk.x * v3Blk.y))); int iNrOfXBlocks = int(ceilf(float(i3VolumeSize.x) / float(v3Blk.x))); int iNrOfYBlocks = int(ceilf(float(i3VolumeSize.y) / float(v3Blk.y))); iMaxNrOfBlocks = min(iMaxNrOfBlocks, iNrOfXBlocks * iNrOfYBlocks); iNrOfXBlocks = min(iNrOfXBlocks, iMaxNrOfBlocks); iNrOfYBlocks = int(ceilf(float(iMaxNrOfBlocks) / float(iNrOfXBlocks))); dim3 v3Grid = dim3(iNrOfXBlocks, iNrOfYBlocks); fprintf(stderr, "MEM = %d MB; #BLOCKS = %d x %d\n", iMaxMemorySpace/(1<<20), v3Grid.x, v3Grid.y); cudaPitchedPtr cActiveVoxelHistorgrams_pitched; cActiveVoxelHistorgrams_pitched.xsize = v3Grid.x * v3Grid.y * v3Blk.x * v3Blk.y; cActiveVoxelHistorgrams_pitched.ysize = iNrOfBins; CUDA_SAFE_CALL_NO_SYNC( cudaMallocPitch( (void**)&cActiveVoxelHistorgrams_pitched.ptr, &cActiveVoxelHistorgrams_pitched.pitch, cActiveVoxelHistorgrams_pitched.xsize * sizeof(int), cActiveVoxelHistorgrams_pitched.ysize) ); CUDA_SAFE_CALL_NO_SYNC( cudaMemset2D( cActiveVoxelHistorgrams_pitched.ptr, cActiveVoxelHistorgrams_pitched.pitch, 0, cActiveVoxelHistorgrams_pitched.xsize * sizeof(int), cActiveVoxelHistorgrams_pitched.ysize) ); // bind the input vin volume to the texture that represents the src. bin volume t2dSrcBinVolume.addressMode[0] = cudaAddressModeClamp; t2dSrcBinVolume.addressMode[1] = cudaAddressModeClamp; t2dSrcBinVolume.filterMode = cudaFilterModePoint; t2dSrcBinVolume.normalized = false; CUDA_SAFE_CALL_NO_SYNC( cudaBindTexture2D( 0, t2dSrcBinVolume, cBinVolume_pitched.ptr, cudaCreateChannelDesc<int>(), cVolumeExtent_array.width, cVolumeExtent_array.height * cVolumeExtent_array.depth, cBinVolume_pitched.pitch) ); // bind the histogram as a texture t2dActiveVoxelHistorgrams.addressMode[0] = cudaAddressModeClamp; t2dActiveVoxelHistorgrams.addressMode[1] = cudaAddressModeClamp; t2dActiveVoxelHistorgrams.filterMode = cudaFilterModePoint; t2dActiveVoxelHistorgrams.normalized = false; CUDA_SAFE_CALL_NO_SYNC( cudaBindTexture2D( 0, t2dActiveVoxelHistorgrams, cActiveVoxelHistorgrams_pitched.ptr, cudaCreateChannelDesc<int>(), cActiveVoxelHistorgrams_pitched.xsize, cActiveVoxelHistorgrams_pitched.ysize, cActiveVoxelHistorgrams_pitched.pitch) ); int iNrOfMarginalBins = int(sqrtf(float(iNrOfBins))); int iNrOfMargins = int(ceilf(float(iNrOfBins)/float(iNrOfMarginalBins))); cudaPitchedPtr cActiveVoxelMarginalEntropy_pitched; cActiveVoxelMarginalEntropy_pitched.xsize = v3Grid.x * v3Grid.y * v3Blk.x * v3Blk.y; cActiveVoxelMarginalEntropy_pitched.ysize = iNrOfMargins * NR_OF_THREADS_PER_MARGIN; CUDA_SAFE_CALL_NO_SYNC( cudaMallocPitch( (void**)&cActiveVoxelMarginalEntropy_pitched.ptr, &cActiveVoxelMarginalEntropy_pitched.pitch, cActiveVoxelMarginalEntropy_pitched.xsize * sizeof(float), cActiveVoxelMarginalEntropy_pitched.ysize) ); CUDA_SAFE_CALL_NO_SYNC( cudaMemset2D( cActiveVoxelMarginalEntropy_pitched.ptr, cActiveVoxelMarginalEntropy_pitched.pitch, 0, cActiveVoxelMarginalEntropy_pitched.pitch, cActiveVoxelMarginalEntropy_pitched.ysize) ); t2dActiveVoxelMarginalEntropy.addressMode[0] = cudaAddressModeClamp; t2dActiveVoxelMarginalEntropy.addressMode[1] = cudaAddressModeClamp; t2dActiveVoxelMarginalEntropy.filterMode = cudaFilterModePoint; t2dActiveVoxelMarginalEntropy.normalized = false; CUDA_SAFE_CALL_NO_SYNC( cudaBindTexture2D( 0, t2dActiveVoxelMarginalEntropy, cActiveVoxelMarginalEntropy_pitched.ptr, cudaCreateChannelDesc<float>(), cActiveVoxelMarginalEntropy_pitched.xsize, cActiveVoxelMarginalEntropy_pitched.ysize, cActiveVoxelMarginalEntropy_pitched.pitch) ); CLOCK_END(_ComputeEntropyVolume_PRINT_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_TIMING); for(int y = 0; y < i3VolumeSize.y; y += v3Grid.y * v3Blk.y) for(int x = 0; x < i3VolumeSize.x; x += v3Grid.x * v3Blk.x) for(int z = 0; z < i3VolumeSize.z; z++) { int3 i3BlockCorner = make_int3(x, y, z); CLOCK_INIT(_ComputeEntropyVolume_PRINT_LOOP_TIMING, __FUNCTION__ " (main loop): "); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_LOOP_TIMING); if( 0 == z ) { _CreateHistogram_kernel<<<v3Grid, v3Blk, 0>>> ( i3BlockCorner, i3KernelSize, i3VolumeSize, iNrOfBins, cActiveVoxelHistorgrams_pitched ); CUT_CHECK_ERROR("_CreateHistogram_kernel() failed"); } else { _UpdateHistogram_kernel<<<v3Grid, v3Blk, 0>>> ( i3BlockCorner, i3KernelSize, i3VolumeSize, iNrOfBins, cActiveVoxelHistorgrams_pitched ); CUT_CHECK_ERROR("_UpdateHistogram_kernel() failed"); } CLOCK_END(_ComputeEntropyVolume_PRINT_LOOP_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_LOOP_TIMING); #if SCANNING_METHOD == SCANNING_METHOD_SCAN_WHOLE_HISTOGRAM _ComputeEntropy_kernel<<<v3Grid, v3Blk, 0>>> ( i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, cEntropyVolume_pitched ); CUT_CHECK_ERROR("_ComputeEntropy_kernel() failed"); #endif // #if SCANNING_METHOD == SCANNING_METHOD_SCAN_WHOLE_HISTOGRAM #if SCANNING_METHOD == SCANNING_METHOD_HIERARCHICAL_SCAN int iNrOfLevels = int(floorf(log2f(float(iNrOfBins)))); int iNrOfThreadsPerVoxel = iNrOfBins/2; for(int l = 1; l < iNrOfLevels; l++) { _ComputeEntropyHierarchically_kernel<<< dim3(v3Grid.x * v3Grid.y, iNrOfThreadsPerVoxel), v3Blk, 0 >>> ( i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, v3Grid.x, v3Grid.y, l, cActiveVoxelHistorgrams_pitched, cEntropyVolume_pitched ); iNrOfThreadsPerVoxel /= 2; } CUT_CHECK_ERROR("_ComputeEntropyHierarchically_kernel() failed"); #endif // #if SCANNING_METHOD == SCANNING_METHOD_HIERARCHICAL_SCAN #if SCANNING_METHOD == SCANNING_METHOD_SCAN_ROWS_IN_PARALLEL _ComputeMarginalEntropy_kernel<<< dim3(v3Grid.x * v3Grid.y, NR_OF_THREADS_PER_MARGIN * iNrOfMargins), v3Blk, 0 >>> ( i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, v3Grid.x, v3Grid.y, iNrOfMarginalBins, NR_OF_THREADS_PER_MARGIN, cActiveVoxelMarginalEntropy_pitched ); CUT_CHECK_ERROR("_ComputeMarginalEntropy_kernel() failed"); CLOCK_END(_ComputeEntropyVolume_PRINT_LOOP_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_LOOP_TIMING); _SumMarginalEntropy_kernel<<<v3Grid, v3Blk, 0>>> ( i3BlockCorner, iNrOfBins, i3KernelSize, i3VolumeSize, iNrOfMargins, NR_OF_THREADS_PER_MARGIN, cEntropyVolume_pitched ); CUT_CHECK_ERROR("_SumMarginalEntropy_kernel() failed"); #endif // #if SCANNING_METHOD == SCANNING_METHOD_SCAN_ROWS_IN_PARALLEL CLOCK_END(_ComputeEntropyVolume_PRINT_LOOP_TIMING, false); CLOCK_PRINT(_ComputeEntropyVolume_PRINT_LOOP_TIMING); } CLOCK_END(_ComputeEntropyVolume_PRINT_TIMING, false); CLOCK_BEGIN(_ComputeEntropyVolume_PRINT_TIMING); FREE_MEMORY(cActiveVoxelHistorgrams_pitched.ptr); FREE_MEMORY(cActiveVoxelMarginalEntropy_pitched.ptr); CLOCK_END(_ComputeEntropyVolume_PRINT_TIMING, false); CLOCK_PRINT(_ComputeEntropyVolume_PRINT_TIMING); } #endif // #if COMPUTE_ENTROPY_VOLUME_CUDA /* $Log: not supported by cvs2svn $ */
2952ca7e2747c1da74e04bb4f2ab98d9a5676b1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "threadFenceReduction_kernel.cuh" #include "posFind.cuh" struct posFinder { TASKCFG place init() {return place(0u, 0.0);} TASKCFG place reduce(place x, place y){if (y.val > x.val) return y; else return x;} TASKCFG place f(unsigned int i, double c){return place(i, fabs(c));} }; __global__ void findPeak_512_e2(const double *g_idata, place *g_odata, unsigned int n) { reduceSinglePass_devGen<512, true, place, double, posFinder>(g_idata, g_odata, n); }
2952ca7e2747c1da74e04bb4f2ab98d9a5676b1e.cu
#include "threadFenceReduction_kernel.cuh" #include "posFind.cuh" struct posFinder { TASKCFG place init() {return place(0u, 0.0);} TASKCFG place reduce(place x, place y){if (y.val > x.val) return y; else return x;} TASKCFG place f(unsigned int i, double c){return place(i, fabs(c));} }; __global__ void findPeak_512_e2(const double *g_idata, place *g_odata, unsigned int n) { reduceSinglePass_devGen<512, true, place, double, posFinder>(g_idata, g_odata, n); }
4da6c61845d6823a53e2f97b9dfb4b5ab26cb2f0.hip
// !!! This is a file automatically generated by hipify!!! #include "generators.cuh" #include "../buffer_manager.cuh" #include <iostream> #include <chrono> // #include <thread> // #include <chrono> using namespace std; __host__ generator::generator(h_operator_t * parent, int32_t *src, size_t N): parent(parent), src(src), N(N){ // parent->open(); // hipStream_t strm; gpu(hipStreamCreateWithFlags(&strm, hipStreamNonBlocking)); // buffer_pool<int32_t>::buffer_t ** buff_ret; gpu(hipHostMalloc(&buff_ret, sizeof(buffer_pool<int32_t>::buffer_t *))); // buffer_pool<int32_t>::buffer_t::inspector_t insp(strm); // while (N > 0){ // buffer_pool<int32_t>::buffer_t * buff = buffer_manager<int32_t>::get_buffer(); // int m = min(N, buff_size); // insp.load(buff, true); // insp.overwrite(src, m); // insp.save(buff, true); // parent->consume(buff); // // variant::apply_visitor(push(buff), *parent); // // push()(*parent); //->consume(buff); // N -= m; // src += m; // } // gpu(hipStreamSynchronize(strm)); // gpu(hipStreamDestroy(strm)); // gpu(hipHostFree(buff_ret)); // parent->close(); } __host__ void generator::open(){ parent->open(); } __host__ void generator::consume(buffer_pool<int32_t>::buffer_t * data){ assert(false); } __host__ void generator::close(){ { buffer_pool<int32_t>::buffer_t::inspector_t insp(strm); auto start = chrono::system_clock::now(); while (N > 0){ buffer_pool<int32_t>::buffer_t * buff = buffer_manager<int32_t>::get_buffer(); // buffer_pool<int32_t>::buffer_t * buff = buffer_manager<int32_t>::h_get_buffer(1); size_t m = min(N, (size_t) buffer_pool<int32_t>::buffer_t::capacity()); // insp.load(buff, true); // insp.overwrite(src, m, false); // insp.save(buff, true); // memcpy(buff->data, src, m * sizeof(int32_t)); buff->data = src; buff->cnt = m; assert(m > 0); parent->consume(buff); N -= m; src += m; } auto end = chrono::system_clock::now(); cout << chrono::duration_cast<chrono::milliseconds>(end - start).count() << "ms" << endl; } { auto start = chrono::system_clock::now(); gpu(hipStreamSynchronize(strm)); auto end = chrono::system_clock::now(); cout << chrono::duration_cast<chrono::milliseconds>(end - start).count() << "ms." << endl; } parent->close(); } __host__ generator::~generator(){ gpu(hipStreamDestroy(strm)); gpu(hipHostFree(buff_ret)); } // template<> // __host__ __device__ void push::operator()<generator *>(generator * op) const{ // op->consume(NULL); // }
4da6c61845d6823a53e2f97b9dfb4b5ab26cb2f0.cu
#include "generators.cuh" #include "../buffer_manager.cuh" #include <iostream> #include <chrono> // #include <thread> // #include <chrono> using namespace std; __host__ generator::generator(h_operator_t * parent, int32_t *src, size_t N): parent(parent), src(src), N(N){ // parent->open(); // cudaStream_t strm; gpu(cudaStreamCreateWithFlags(&strm, cudaStreamNonBlocking)); // buffer_pool<int32_t>::buffer_t ** buff_ret; gpu(cudaMallocHost(&buff_ret, sizeof(buffer_pool<int32_t>::buffer_t *))); // buffer_pool<int32_t>::buffer_t::inspector_t insp(strm); // while (N > 0){ // buffer_pool<int32_t>::buffer_t * buff = buffer_manager<int32_t>::get_buffer(); // int m = min(N, buff_size); // insp.load(buff, true); // insp.overwrite(src, m); // insp.save(buff, true); // parent->consume(buff); // // variant::apply_visitor(push(buff), *parent); // // push()(*parent); //->consume(buff); // N -= m; // src += m; // } // gpu(cudaStreamSynchronize(strm)); // gpu(cudaStreamDestroy(strm)); // gpu(cudaFreeHost(buff_ret)); // parent->close(); } __host__ void generator::open(){ parent->open(); } __host__ void generator::consume(buffer_pool<int32_t>::buffer_t * data){ assert(false); } __host__ void generator::close(){ { buffer_pool<int32_t>::buffer_t::inspector_t insp(strm); auto start = chrono::system_clock::now(); while (N > 0){ buffer_pool<int32_t>::buffer_t * buff = buffer_manager<int32_t>::get_buffer(); // buffer_pool<int32_t>::buffer_t * buff = buffer_manager<int32_t>::h_get_buffer(1); size_t m = min(N, (size_t) buffer_pool<int32_t>::buffer_t::capacity()); // insp.load(buff, true); // insp.overwrite(src, m, false); // insp.save(buff, true); // memcpy(buff->data, src, m * sizeof(int32_t)); buff->data = src; buff->cnt = m; assert(m > 0); parent->consume(buff); N -= m; src += m; } auto end = chrono::system_clock::now(); cout << chrono::duration_cast<chrono::milliseconds>(end - start).count() << "ms" << endl; } { auto start = chrono::system_clock::now(); gpu(cudaStreamSynchronize(strm)); auto end = chrono::system_clock::now(); cout << chrono::duration_cast<chrono::milliseconds>(end - start).count() << "ms." << endl; } parent->close(); } __host__ generator::~generator(){ gpu(cudaStreamDestroy(strm)); gpu(cudaFreeHost(buff_ret)); } // template<> // __host__ __device__ void push::operator()<generator *>(generator * op) const{ // op->consume(NULL); // }
7bdc56c6b0a32054ade28089302fe6bc79ae6d07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef KD_TRAVERSE_CU #define KD_TRAVERSE_CU // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_cuda.h> // includes, kernels #include "common_hip.cuh" //USE_TEX #include "KdTraverse_kernel.cu" //////////////////////////////////////////////////////////////////////////////// //! Entry point for Cuda functionality on host side //! @param argc command line argument count //! @param argv command line arguments //! @param data data to process on the device //! @param len len of \a data //////////////////////////////////////////////////////////////////////////////// extern "C" { void traverseTreeCuda ( float4* dPoints, int numPoints, int* dCells, int* dBlocks, KdTreeGPU kdTree, MeshGPU mesh, hipStream_t stream ) { if( numPoints < 1 ) return; // Bind textures checkCudaErrors(hipBindTexture(0, STex, kdTree.dS, kdTree.sS)); checkCudaErrors(hipBindTexture(0, LTex, kdTree.dL, kdTree.sL)); //cutilSafeCall(hipBindTexture(0, ITex, kdTree.dI, kdTree.sI)); //cutilSafeCall(hipBindTexture(0, nodesTex, mesh.nodes, mesh.s_nodes)); // setup execution parameters int threadsPerBlock = 256; // Evaluate 128 positions per block int numBlocks = ( numPoints + threadsPerBlock-1 ) / threadsPerBlock; CREATE_TIMER START_TIMER(stream) //// execute the kernel hipLaunchKernelGGL(( traverseTreeSPKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, stream , (float4*) dPoints, numPoints, dCells, dBlocks, kdTree ); getLastCudaError("Kernel execution failed"); STOP_TIMER(stream) PRINT_TIMER("Tree-Traverse: ", "\n"); STORE_TIMER(kdTreeTimer) DESTROY_TIMER checkCudaErrors(hipUnbindTexture(STex)); checkCudaErrors(hipUnbindTexture(LTex)); //cutilSafeCall(hipUnbindTexture(ITex)); //cutilSafeCall(hipUnbindTexture(nodesTex)); }; } #endif
7bdc56c6b0a32054ade28089302fe6bc79ae6d07.cu
#ifndef KD_TRAVERSE_CU #define KD_TRAVERSE_CU // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_cuda.h> // includes, kernels #include "common.cuh" //USE_TEX #include "KdTraverse_kernel.cu" //////////////////////////////////////////////////////////////////////////////// //! Entry point for Cuda functionality on host side //! @param argc command line argument count //! @param argv command line arguments //! @param data data to process on the device //! @param len len of \a data //////////////////////////////////////////////////////////////////////////////// extern "C" { void traverseTreeCuda ( float4* dPoints, int numPoints, int* dCells, int* dBlocks, KdTreeGPU kdTree, MeshGPU mesh, cudaStream_t stream ) { if( numPoints < 1 ) return; // Bind textures checkCudaErrors(cudaBindTexture(0, STex, kdTree.dS, kdTree.sS)); checkCudaErrors(cudaBindTexture(0, LTex, kdTree.dL, kdTree.sL)); //cutilSafeCall(cudaBindTexture(0, ITex, kdTree.dI, kdTree.sI)); //cutilSafeCall(cudaBindTexture(0, nodesTex, mesh.nodes, mesh.s_nodes)); // setup execution parameters int threadsPerBlock = 256; // Evaluate 128 positions per block int numBlocks = ( numPoints + threadsPerBlock-1 ) / threadsPerBlock; CREATE_TIMER START_TIMER(stream) //// execute the kernel traverseTreeSPKernel<<< numBlocks, threadsPerBlock, 0, stream >>> ((float4*) dPoints, numPoints, dCells, dBlocks, kdTree ); getLastCudaError("Kernel execution failed"); STOP_TIMER(stream) PRINT_TIMER("Tree-Traverse: ", "\n"); STORE_TIMER(kdTreeTimer) DESTROY_TIMER checkCudaErrors(cudaUnbindTexture(STex)); checkCudaErrors(cudaUnbindTexture(LTex)); //cutilSafeCall(cudaUnbindTexture(ITex)); //cutilSafeCall(cudaUnbindTexture(nodesTex)); }; } #endif
cf7863dcc58ea6866ad049d868f96ce6f33af232.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "kernels_hip.cuh" namespace emida { template<typename T> __global__ void prepare_pics(T* __restrict pic, const T* hanning_x, const T* hanning_y, const T* sums, size2_t size, size_t batch_size) { size_t whole_x = blockIdx.x * blockDim.x + threadIdx.x; size_t pic_x = whole_x % size.x; size_t pic_num = whole_x / size.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; //Problem? many threads may end up nothing if number of rows and block_size.y are in poor combination if (pic_num >= batch_size || y >= size.y) return; T& pixel = pic[pic_num * size.area() + y * size.x + pic_x]; //subtract mean of the picture pixel -= sums[pic_num] / size.area(); //apply hanning filter pixel *= hanning_x[pic_x] * hanning_y[y]; } template<typename T> void run_prepare_pics(T* pic, const T* hanning_x, const T* hanning_y, const T * sums, size2_t size, size_t batch_size) { dim3 block_size(16, 16); dim3 grid_size(div_up(size.x * batch_size, block_size.x), div_up(size.y, block_size.y)); hipLaunchKernelGGL(( prepare_pics<T>) , dim3(grid_size), dim3(block_size), 0, 0, pic, hanning_x, hanning_y, sums, size, batch_size); } template void run_prepare_pics<double>(double * pic, const double * hanning_x, const double* hanning_y, const double * sums, size2_t size, size_t batch_size); template<typename IN, typename OUT> __global__ void prepare_pics( const IN* __restrict__ pic, OUT* __restrict__ slices, const OUT* hanning_x, const OUT* hanning_y, const OUT* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t slice_tid = tid % out_size.area(); size_t slice_num = tid / out_size.area(); size2_t slice_pos = { slice_tid % out_size.x, slice_tid / out_size.x }; if (slice_num >= batch_size) return; size2_t pic_pos = begins[slice_num] + slice_pos; if (slice_pos.x >= slice_size.x || slice_pos.y >= slice_size.y) { slices[tid] = 0; return; } OUT pixel = pic[pic_pos.pos(src_size.x)]; //subtract mean of the picture pixel -= sums[slice_num] / slice_size.area(); //apply hanning filter and convert to OUT (float or double) pixel = (OUT)pixel * hanning_x[slice_pos.x] * hanning_y[slice_pos.y]; slices[tid] = pixel; } template<typename IN, typename OUT> void run_prepare_pics( const IN* pic, OUT* slices, const OUT* hanning_x, const OUT* hanning_y, const OUT* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size) { size_t block_size = 1024; size_t grid_size(div_up(out_size.area() * batch_size, block_size)); hipLaunchKernelGGL(( prepare_pics), dim3(grid_size), dim3(block_size) , 0, 0, pic, slices, hanning_x, hanning_y, sums, begins, src_size, slice_size, out_size, batch_size); } template void run_prepare_pics<uint16_t, double>( const uint16_t* pic, double* slices, const double* hanning_x, const double* hanning_y, const double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size); template void run_prepare_pics<uint16_t, float>( const uint16_t* pic, float* slices, const float* hanning_x, const float* hanning_y, const float* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size); template void run_prepare_pics<double, double>( const double* pic, double* slices, const double* hanning_x, const double* hanning_y, const double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size); }
cf7863dcc58ea6866ad049d868f96ce6f33af232.cu
#include "cuda.h" #include "cuda_runtime.h" #include "kernels.cuh" namespace emida { template<typename T> __global__ void prepare_pics(T* __restrict pic, const T* hanning_x, const T* hanning_y, const T* sums, size2_t size, size_t batch_size) { size_t whole_x = blockIdx.x * blockDim.x + threadIdx.x; size_t pic_x = whole_x % size.x; size_t pic_num = whole_x / size.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; //Problem? many threads may end up nothing if number of rows and block_size.y are in poor combination if (pic_num >= batch_size || y >= size.y) return; T& pixel = pic[pic_num * size.area() + y * size.x + pic_x]; //subtract mean of the picture pixel -= sums[pic_num] / size.area(); //apply hanning filter pixel *= hanning_x[pic_x] * hanning_y[y]; } template<typename T> void run_prepare_pics(T* pic, const T* hanning_x, const T* hanning_y, const T * sums, size2_t size, size_t batch_size) { dim3 block_size(16, 16); dim3 grid_size(div_up(size.x * batch_size, block_size.x), div_up(size.y, block_size.y)); prepare_pics<T> <<<grid_size, block_size>>> (pic, hanning_x, hanning_y, sums, size, batch_size); } template void run_prepare_pics<double>(double * pic, const double * hanning_x, const double* hanning_y, const double * sums, size2_t size, size_t batch_size); template<typename IN, typename OUT> __global__ void prepare_pics( const IN* __restrict__ pic, OUT* __restrict__ slices, const OUT* hanning_x, const OUT* hanning_y, const OUT* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t slice_tid = tid % out_size.area(); size_t slice_num = tid / out_size.area(); size2_t slice_pos = { slice_tid % out_size.x, slice_tid / out_size.x }; if (slice_num >= batch_size) return; size2_t pic_pos = begins[slice_num] + slice_pos; if (slice_pos.x >= slice_size.x || slice_pos.y >= slice_size.y) { slices[tid] = 0; return; } OUT pixel = pic[pic_pos.pos(src_size.x)]; //subtract mean of the picture pixel -= sums[slice_num] / slice_size.area(); //apply hanning filter and convert to OUT (float or double) pixel = (OUT)pixel * hanning_x[slice_pos.x] * hanning_y[slice_pos.y]; slices[tid] = pixel; } template<typename IN, typename OUT> void run_prepare_pics( const IN* pic, OUT* slices, const OUT* hanning_x, const OUT* hanning_y, const OUT* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size) { size_t block_size = 1024; size_t grid_size(div_up(out_size.area() * batch_size, block_size)); prepare_pics<<<grid_size, block_size >>> (pic, slices, hanning_x, hanning_y, sums, begins, src_size, slice_size, out_size, batch_size); } template void run_prepare_pics<uint16_t, double>( const uint16_t* pic, double* slices, const double* hanning_x, const double* hanning_y, const double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size); template void run_prepare_pics<uint16_t, float>( const uint16_t* pic, float* slices, const float* hanning_x, const float* hanning_y, const float* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size); template void run_prepare_pics<double, double>( const double* pic, double* slices, const double* hanning_x, const double* hanning_y, const double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size2_t out_size, size_t batch_size); }
24a79881cd346feac3b3abb4534a1a80cd3b2efe.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2015 Patrick Diehl // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <hip/hip_runtime.h> #include <iostream> #include <cmath> #include "opencl/benchmark_vector/timer.hpp" #include "config.hpp" #include "utils.hpp" //########################################################################### //Kernels //########################################################################### template<typename T> __global__ void kernel(size_t offset, T* in) { size_t i = offset + threadIdx.x + blockIdx.x * blockDim.x; T x = (T) i; T s = sinf(x); T c = cosf(x); in[i] = in[i] + sqrtf(s * s + c * c); } //########################################################################### //Main //########################################################################### int main(int argc, char*argv[]) { if (argc != 2) { std::cout << "Usage: " << argv[0] << " #elements" << std::endl; exit(1); } timer_start(); size_t count = atoi(argv[1]); double time = 0; const int blockSize = 256, nStreams = 4; const int n = pow(2,count) * 1024 * blockSize * nStreams; const int streamSize = n / nStreams; const int streamBytes = streamSize * sizeof(TYPE); const int bytes = n * sizeof(TYPE); std::cout << n << " "; //Pointer TYPE* in; TYPE* in_dev; //Malloc Host hipHostMalloc((void**) &in, bytes); memset(in, 0, bytes); //Malloc Device hipMalloc((void**) &in_dev, bytes); //Create streams hipStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) hipStreamCreate(&stream[i]); //Copy data to device for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; hipMemcpyAsync(&in_dev[offset], &in[offset], streamBytes, hipMemcpyHostToDevice,stream[i]); } //Launch kernels for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; hipLaunchKernelGGL(( kernel), dim3(streamSize / blockSize), dim3(blockSize), 0, stream[i], offset, in_dev); } //Copy the result back for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; hipMemcpyAsync(&in[offset], &in_dev[offset], streamBytes, hipMemcpyDeviceToHost, stream[i]); } hipDeviceSynchronize(); time += timer_stop(); //Check the result std::cout << checkKernel(in, n) << " "; timer_start(); //Clean hipHostFree(in); hipFree(in_dev); for (int i = 0; i < nStreams; ++i) hipStreamDestroy(stream[i]); std:: cout << time + timer_stop() << std::endl; return EXIT_SUCCESS; }
24a79881cd346feac3b3abb4534a1a80cd3b2efe.cu
// Copyright (c) 2015 Patrick Diehl // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <cuda.h> #include <iostream> #include <cmath> #include "opencl/benchmark_vector/timer.hpp" #include "config.hpp" #include "utils.hpp" //########################################################################### //Kernels //########################################################################### template<typename T> __global__ void kernel(size_t offset, T* in) { size_t i = offset + threadIdx.x + blockIdx.x * blockDim.x; T x = (T) i; T s = sinf(x); T c = cosf(x); in[i] = in[i] + sqrtf(s * s + c * c); } //########################################################################### //Main //########################################################################### int main(int argc, char*argv[]) { if (argc != 2) { std::cout << "Usage: " << argv[0] << " #elements" << std::endl; exit(1); } timer_start(); size_t count = atoi(argv[1]); double time = 0; const int blockSize = 256, nStreams = 4; const int n = pow(2,count) * 1024 * blockSize * nStreams; const int streamSize = n / nStreams; const int streamBytes = streamSize * sizeof(TYPE); const int bytes = n * sizeof(TYPE); std::cout << n << " "; //Pointer TYPE* in; TYPE* in_dev; //Malloc Host cudaMallocHost((void**) &in, bytes); memset(in, 0, bytes); //Malloc Device cudaMalloc((void**) &in_dev, bytes); //Create streams cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) cudaStreamCreate(&stream[i]); //Copy data to device for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; cudaMemcpyAsync(&in_dev[offset], &in[offset], streamBytes, cudaMemcpyHostToDevice,stream[i]); } //Launch kernels for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; kernel<<<streamSize / blockSize, blockSize, 0, stream[i]>>>(offset, in_dev); } //Copy the result back for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; cudaMemcpyAsync(&in[offset], &in_dev[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i]); } cudaDeviceSynchronize(); time += timer_stop(); //Check the result std::cout << checkKernel(in, n) << " "; timer_start(); //Clean cudaFreeHost(in); cudaFree(in_dev); for (int i = 0; i < nStreams; ++i) cudaStreamDestroy(stream[i]); std:: cout << time + timer_stop() << std::endl; return EXIT_SUCCESS; }
4f32eac9a917bf79364ba1d087bb85acb140deac.hip
// !!! This is a file automatically generated by hipify!!! /***************************************** Emitting C Generated Code *******************************************/ #include <string.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include "cublas_header.h" #include <stdbool.h> /**************** Snippet ****************/ void Snippet(int x0) { float* x1 = (float*)malloc(2 * sizeof(float)); int x2 = 0; while (x2 != 2) { x1[x2] = 1.0; x2 = x2 + 1; } float* x3 = (float*)malloc(4 * sizeof(float)); int x4 = 0; while (x4 != 4) { x3[x4] = 2.0; x4 = x4 + 1; } float* x5 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x5, (size_t)(4 * sizeof(float)))); CUDA_CALL(hipMemcpy(x5, x3, (size_t)(4 * sizeof(float)), hipMemcpyHostToDevice)); float* x6 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x6, (size_t)(2 * sizeof(float)))); CUDA_CALL(hipMemcpy(x6, x1, (size_t)(2 * sizeof(float)), hipMemcpyHostToDevice)); float* x7 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x7, (size_t)(2 * sizeof(float)))); CUDA_CALL(hipMemcpy(x7, x1, (size_t)(2 * sizeof(float)), hipMemcpyHostToDevice)); float* x8 = (float*)malloc(2 * sizeof(float)); hipblasHandle_t x9; CUBLAS_CALL(hipblasCreate(&x9)); float x10 = 1.0; float x11 = 1.0; CUBLAS_CALL(hipblasSgemv(x9, HIPBLAS_OP_N, 2, 2, &x10, x5, 2, x6, 1, &x11, x7, 1)); CUDA_CALL(hipMemcpy(x8, x7, (size_t)(2 * sizeof(float)), hipMemcpyDeviceToHost)); printf("Test GEMV:\n"); int x12 = 0; while (x12 != 2) { printf("%f, ", x8[x12]); x12 = x12 + 1; } CUDA_CALL(hipFree(x5)); CUDA_CALL(hipFree(x6)); CUDA_CALL(hipFree(x7)); CUBLAS_CALL(hipblasDestroy(x9)); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
4f32eac9a917bf79364ba1d087bb85acb140deac.cu
/***************************************** Emitting C Generated Code *******************************************/ #include <string.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include "cublas_header.h" #include <stdbool.h> /**************** Snippet ****************/ void Snippet(int x0) { float* x1 = (float*)malloc(2 * sizeof(float)); int x2 = 0; while (x2 != 2) { x1[x2] = 1.0; x2 = x2 + 1; } float* x3 = (float*)malloc(4 * sizeof(float)); int x4 = 0; while (x4 != 4) { x3[x4] = 2.0; x4 = x4 + 1; } float* x5 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x5, (size_t)(4 * sizeof(float)))); CUDA_CALL(cudaMemcpy(x5, x3, (size_t)(4 * sizeof(float)), cudaMemcpyHostToDevice)); float* x6 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x6, (size_t)(2 * sizeof(float)))); CUDA_CALL(cudaMemcpy(x6, x1, (size_t)(2 * sizeof(float)), cudaMemcpyHostToDevice)); float* x7 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x7, (size_t)(2 * sizeof(float)))); CUDA_CALL(cudaMemcpy(x7, x1, (size_t)(2 * sizeof(float)), cudaMemcpyHostToDevice)); float* x8 = (float*)malloc(2 * sizeof(float)); cublasHandle_t x9; CUBLAS_CALL(cublasCreate(&x9)); float x10 = 1.0; float x11 = 1.0; CUBLAS_CALL(cublasSgemv(x9, CUBLAS_OP_N, 2, 2, &x10, x5, 2, x6, 1, &x11, x7, 1)); CUDA_CALL(cudaMemcpy(x8, x7, (size_t)(2 * sizeof(float)), cudaMemcpyDeviceToHost)); printf("Test GEMV:\n"); int x12 = 0; while (x12 != 2) { printf("%f, ", x8[x12]); x12 = x12 + 1; } CUDA_CALL(cudaFree(x5)); CUDA_CALL(cudaFree(x6)); CUDA_CALL(cudaFree(x7)); CUBLAS_CALL(cublasDestroy(x9)); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
f2e5b0609cdc2897387ce6f0eb952777201677db.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cublas_helpers.h" #include "sparse_gemm_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const& options) : OperationProfiler(options, library::OperationKind::kSparseGemm, { {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. gemm, planar " "complex, batched, ...)"}, {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, {ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"}, }) { description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C"; } /// Destructor SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {} /// Prints usage statement for the math function void SparseGemmOperationProfiler::print_usage(std::ostream& out) const { out << "Sparse GEMM" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void SparseGemmOperationProfiler::print_examples(std::ostream& out) const { out << "\nExamples:\n\n" << "Profile a particular problem size:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 " "--k=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 " "--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=SparseGemm " "--accumulator-type=f16,f32\n\n" << "Run when A is f16 with column-major and B is any datatype with " "row-major (For column major, use column, col, or n. For row major " "use, row or t):\n" << " $ cutlass_profiler --operation=SparseGemm --A=f16:column " "--B=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace " "if results are incorrect (note that --cta-tile::k=32 is default " "cta-tile size):\n" << " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 " " --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to gemm kernels with a quick functional test and " "save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=SparseGemm \\ \n" << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Status SparseGemmOperationProfiler::SparseGemmProblem::parse( library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!arg_as_int(this->m, "m", problem_space, problem)) { // default value this->m = 1024; } if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->k, "k", problem_space, problem)) { // default value this->k = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } this->elements_per_128b = 128 / library::sizeof_bits(operation_desc.A.element); this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->m), int(this->k) / int(this->sparse)}) .front(); this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->k), int(this->n)}) .front(); this->ldc = DeviceAllocation::get_packed_layout( operation_desc.C.layout, {int(this->m), int(this->n)}) .front(); this->lde = DeviceAllocation::get_packed_layout( operation_desc.E.layout, {int(this->m), int(this->k / this->sparse / this->elements_per_128b)}) .front(); return Status::kSuccess; } /// Initializes a performance result void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result( PerformanceResult& result, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "C", problem_space, std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); set_argument(result, "E", problem_space, std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout)); set_argument(result, "m", problem_space, m); set_argument(result, "n", problem_space, n); set_argument(result, "k", problem_space, k); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } /// Extracts the problem dimensions Status SparseGemmOperationProfiler::initialize_configuration( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (operation_desc.gemm_kind != library::GemmKind::kSparse) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } gemm_workspace_.configuration.problem_size.m() = int(problem_.m); gemm_workspace_.configuration.problem_size.n() = int(problem_.n); gemm_workspace_.configuration.problem_size.k() = int(problem_.k); gemm_workspace_.configuration.lda = problem_.lda; gemm_workspace_.configuration.ldb = problem_.ldb; gemm_workspace_.configuration.ldc = problem_.ldc; gemm_workspace_.configuration.ldd = problem_.ldc; gemm_workspace_.configuration.lde = problem_.lde; gemm_workspace_.arguments.A = nullptr; gemm_workspace_.arguments.B = nullptr; gemm_workspace_.arguments.C = nullptr; gemm_workspace_.arguments.D = nullptr; gemm_workspace_.arguments.E = nullptr; gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments); } /// Initializes the performance result void SparseGemmOperationProfiler::initialize_result_( PerformanceResult& result, Options const& options, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Input bytes read and Output bytes written for the gemm problem result.bytes = int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * problem_.k / problem_.sparse + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) * problem_.k + int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) * problem_.k / problem_.sparse / problem_.elements_per_128b; // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i == 0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n; } result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n); result.runtime = 0; } /// Initializes workspace Status SparseGemmOperationProfiler::initialize_workspace( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { gemm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.m), int(problem_.k) / int(problem_.sparse)}, {int(problem_.lda)}); gemm_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.k), int(problem_.n)}, {int(problem_.ldb)}); gemm_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.E = device_context.allocate_sparsemeta_tensor( options, "E", operation_desc.E.element, operation_desc.E.layout, operation_desc.A.element, {int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)}, {int(problem_.lde)}); gemm_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data()); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &gemm_workspace_.configuration, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kSparseGemm; results_.back().disposition = Disposition::kNotRun; for (auto& verification_provider : options.verification.providers) { results_.back().verification_map[verification_provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool SparseGemmOperationProfiler::verify_cutlass( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } hipError_t result = hipDeviceSynchronize(); if (result != hipSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for (auto& m : results_.back().verification_map) { if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if (!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if (is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool SparseGemmOperationProfiler::profile( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_(results_.back().runtime, options, operation, &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
f2e5b0609cdc2897387ce6f0eb952777201677db.cu
/*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cublas_helpers.h" #include "sparse_gemm_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const& options) : OperationProfiler(options, library::OperationKind::kSparseGemm, { {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. gemm, planar " "complex, batched, ...)"}, {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"}, {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, {ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"}, }) { description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C"; } /// Destructor SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {} /// Prints usage statement for the math function void SparseGemmOperationProfiler::print_usage(std::ostream& out) const { out << "Sparse GEMM" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void SparseGemmOperationProfiler::print_examples(std::ostream& out) const { out << "\nExamples:\n\n" << "Profile a particular problem size:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 " "--k=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 " "--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=SparseGemm " "--accumulator-type=f16,f32\n\n" << "Run when A is f16 with column-major and B is any datatype with " "row-major (For column major, use column, col, or n. For row major " "use, row or t):\n" << " $ cutlass_profiler --operation=SparseGemm --A=f16:column " "--B=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=SparseGemm " "--dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace " "if results are incorrect (note that --cta-tile::k=32 is default " "cta-tile size):\n" << " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 " " --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to gemm kernels with a quick functional test and " "save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=SparseGemm \\ \n" << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Status SparseGemmOperationProfiler::SparseGemmProblem::parse( library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!arg_as_int(this->m, "m", problem_space, problem)) { // default value this->m = 1024; } if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->k, "k", problem_space, problem)) { // default value this->k = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } this->elements_per_128b = 128 / library::sizeof_bits(operation_desc.A.element); this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->m), int(this->k) / int(this->sparse)}) .front(); this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->k), int(this->n)}) .front(); this->ldc = DeviceAllocation::get_packed_layout( operation_desc.C.layout, {int(this->m), int(this->n)}) .front(); this->lde = DeviceAllocation::get_packed_layout( operation_desc.E.layout, {int(this->m), int(this->k / this->sparse / this->elements_per_128b)}) .front(); return Status::kSuccess; } /// Initializes a performance result void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result( PerformanceResult& result, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "C", problem_space, std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); set_argument(result, "E", problem_space, std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout)); set_argument(result, "m", problem_space, m); set_argument(result, "n", problem_space, n); set_argument(result, "k", problem_space, k); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } /// Extracts the problem dimensions Status SparseGemmOperationProfiler::initialize_configuration( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (operation_desc.gemm_kind != library::GemmKind::kSparse) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } gemm_workspace_.configuration.problem_size.m() = int(problem_.m); gemm_workspace_.configuration.problem_size.n() = int(problem_.n); gemm_workspace_.configuration.problem_size.k() = int(problem_.k); gemm_workspace_.configuration.lda = problem_.lda; gemm_workspace_.configuration.ldb = problem_.ldb; gemm_workspace_.configuration.ldc = problem_.ldc; gemm_workspace_.configuration.ldd = problem_.ldc; gemm_workspace_.configuration.lde = problem_.lde; gemm_workspace_.arguments.A = nullptr; gemm_workspace_.arguments.B = nullptr; gemm_workspace_.arguments.C = nullptr; gemm_workspace_.arguments.D = nullptr; gemm_workspace_.arguments.E = nullptr; gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments); } /// Initializes the performance result void SparseGemmOperationProfiler::initialize_result_( PerformanceResult& result, Options const& options, library::SparseGemmDescription const& operation_desc, ProblemSpace const& problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Input bytes read and Output bytes written for the gemm problem result.bytes = int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * problem_.k / problem_.sparse + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) * problem_.k + int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) * problem_.k / problem_.sparse / problem_.elements_per_128b; // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i == 0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n; } result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n); result.runtime = 0; } /// Initializes workspace Status SparseGemmOperationProfiler::initialize_workspace( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { library::SparseGemmDescription const& operation_desc = static_cast<library::SparseGemmDescription const&>( operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { gemm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.m), int(problem_.k) / int(problem_.sparse)}, {int(problem_.lda)}); gemm_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.k), int(problem_.n)}, {int(problem_.ldb)}); gemm_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.E = device_context.allocate_sparsemeta_tensor( options, "E", operation_desc.E.element, operation_desc.E.layout, operation_desc.A.element, {int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)}, {int(problem_.lde)}); gemm_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldc)}); gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data()); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size( &gemm_workspace_.configuration); gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &gemm_workspace_.configuration, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kSparseGemm; results_.back().disposition = Disposition::kNotRun; for (auto& verification_provider : options.verification.providers) { results_.back().verification_map[verification_provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool SparseGemmOperationProfiler::verify_cutlass( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for (auto& m : results_.back().verification_map) { if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if (!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if (is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool SparseGemmOperationProfiler::profile( Options const& options, PerformanceReport& report, DeviceContext& device_context, library::Operation const* operation, ProblemSpace const& problem_space, ProblemSpace::Problem const& problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing GEMM arguments gemm_workspace_.arguments.A = gemm_workspace_.A->data(); gemm_workspace_.arguments.B = gemm_workspace_.B->data(); gemm_workspace_.arguments.C = gemm_workspace_.C->data(); gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); gemm_workspace_.arguments.E = gemm_workspace_.E->data(); gemm_workspace_.arguments.alpha = problem_.alpha.data(); gemm_workspace_.arguments.beta = problem_.beta.data(); gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_(results_.back().runtime, options, operation, &gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(), gemm_workspace_.device_workspace.data()); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
94a2be252ed76e1d6e8deec7a813802407353674.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_PdV_kernel_predict; int xdim0_PdV_kernel_predict_h = -1; __constant__ int ydim0_PdV_kernel_predict; int ydim0_PdV_kernel_predict_h = -1; __constant__ int xdim1_PdV_kernel_predict; int xdim1_PdV_kernel_predict_h = -1; __constant__ int ydim1_PdV_kernel_predict; int ydim1_PdV_kernel_predict_h = -1; __constant__ int xdim2_PdV_kernel_predict; int xdim2_PdV_kernel_predict_h = -1; __constant__ int ydim2_PdV_kernel_predict; int ydim2_PdV_kernel_predict_h = -1; __constant__ int xdim3_PdV_kernel_predict; int xdim3_PdV_kernel_predict_h = -1; __constant__ int ydim3_PdV_kernel_predict; int ydim3_PdV_kernel_predict_h = -1; __constant__ int xdim4_PdV_kernel_predict; int xdim4_PdV_kernel_predict_h = -1; __constant__ int ydim4_PdV_kernel_predict; int ydim4_PdV_kernel_predict_h = -1; __constant__ int xdim5_PdV_kernel_predict; int xdim5_PdV_kernel_predict_h = -1; __constant__ int ydim5_PdV_kernel_predict; int ydim5_PdV_kernel_predict_h = -1; __constant__ int xdim6_PdV_kernel_predict; int xdim6_PdV_kernel_predict_h = -1; __constant__ int ydim6_PdV_kernel_predict; int ydim6_PdV_kernel_predict_h = -1; __constant__ int xdim7_PdV_kernel_predict; int xdim7_PdV_kernel_predict_h = -1; __constant__ int ydim7_PdV_kernel_predict; int ydim7_PdV_kernel_predict_h = -1; __constant__ int xdim8_PdV_kernel_predict; int xdim8_PdV_kernel_predict_h = -1; __constant__ int ydim8_PdV_kernel_predict; int ydim8_PdV_kernel_predict_h = -1; __constant__ int xdim9_PdV_kernel_predict; int xdim9_PdV_kernel_predict_h = -1; __constant__ int ydim9_PdV_kernel_predict; int ydim9_PdV_kernel_predict_h = -1; __constant__ int xdim10_PdV_kernel_predict; int xdim10_PdV_kernel_predict_h = -1; __constant__ int ydim10_PdV_kernel_predict; int ydim10_PdV_kernel_predict_h = -1; __constant__ int xdim11_PdV_kernel_predict; int xdim11_PdV_kernel_predict_h = -1; __constant__ int ydim11_PdV_kernel_predict; int ydim11_PdV_kernel_predict_h = -1; __constant__ int xdim12_PdV_kernel_predict; int xdim12_PdV_kernel_predict_h = -1; __constant__ int ydim12_PdV_kernel_predict; int ydim12_PdV_kernel_predict_h = -1; __constant__ int xdim13_PdV_kernel_predict; int xdim13_PdV_kernel_predict_h = -1; __constant__ int ydim13_PdV_kernel_predict; int ydim13_PdV_kernel_predict_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_PdV_kernel_predict*(y)+xdim0_PdV_kernel_predict*ydim0_PdV_kernel_predict*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_PdV_kernel_predict*(y)+xdim1_PdV_kernel_predict*ydim1_PdV_kernel_predict*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_PdV_kernel_predict*(y)+xdim2_PdV_kernel_predict*ydim2_PdV_kernel_predict*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_PdV_kernel_predict*(y)+xdim3_PdV_kernel_predict*ydim3_PdV_kernel_predict*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_PdV_kernel_predict*(y)+xdim4_PdV_kernel_predict*ydim4_PdV_kernel_predict*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_PdV_kernel_predict*(y)+xdim5_PdV_kernel_predict*ydim5_PdV_kernel_predict*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_PdV_kernel_predict*(y)+xdim6_PdV_kernel_predict*ydim6_PdV_kernel_predict*(z)) #define OPS_ACC7(x,y,z) (x+xdim7_PdV_kernel_predict*(y)+xdim7_PdV_kernel_predict*ydim7_PdV_kernel_predict*(z)) #define OPS_ACC8(x,y,z) (x+xdim8_PdV_kernel_predict*(y)+xdim8_PdV_kernel_predict*ydim8_PdV_kernel_predict*(z)) #define OPS_ACC9(x,y,z) (x+xdim9_PdV_kernel_predict*(y)+xdim9_PdV_kernel_predict*ydim9_PdV_kernel_predict*(z)) #define OPS_ACC10(x,y,z) (x+xdim10_PdV_kernel_predict*(y)+xdim10_PdV_kernel_predict*ydim10_PdV_kernel_predict*(z)) #define OPS_ACC11(x,y,z) (x+xdim11_PdV_kernel_predict*(y)+xdim11_PdV_kernel_predict*ydim11_PdV_kernel_predict*(z)) #define OPS_ACC12(x,y,z) (x+xdim12_PdV_kernel_predict*(y)+xdim12_PdV_kernel_predict*ydim12_PdV_kernel_predict*(z)) #define OPS_ACC13(x,y,z) (x+xdim13_PdV_kernel_predict*(y)+xdim13_PdV_kernel_predict*ydim13_PdV_kernel_predict*(z)) //user function __device__ void PdV_kernel_predict(const double *xarea, const double *xvel0, const double *yarea, const double *yvel0, double *volume_change, const double *volume, const double *pressure, const double *density0, double *density1, const double *viscosity, const double *energy0, double *energy1, const double *zarea, const double *zvel0) { double recip_volume, energy_change, min_cell_volume; double right_flux, left_flux, top_flux, bottom_flux, back_flux, front_flux, total_flux; left_flux = ( xarea[OPS_ACC0(0,0,0)] * ( xvel0[OPS_ACC1(0,0,0)] + xvel0[OPS_ACC1(0,1,0)] + xvel0[OPS_ACC1(0,0,1)] + xvel0[OPS_ACC1(0,1,1)] + xvel0[OPS_ACC1(0,0,0)] + xvel0[OPS_ACC1(0,1,0)] + xvel0[OPS_ACC1(0,0,1)] + xvel0[OPS_ACC1(0,1,1)] ) ) * 0.125 * dt * 0.5; right_flux = ( xarea[OPS_ACC0(1,0,0)] * ( xvel0[OPS_ACC1(1,0,0)] + xvel0[OPS_ACC1(1,1,0)] + xvel0[OPS_ACC1(1,0,1)] + xvel0[OPS_ACC1(1,1,1)] + xvel0[OPS_ACC1(1,0,0)] + xvel0[OPS_ACC1(1,1,0)] + xvel0[OPS_ACC1(1,0,1)] + xvel0[OPS_ACC1(1,1,1)] ) ) * 0.125 * dt * 0.5; bottom_flux = ( yarea[OPS_ACC2(0,0,0)] * ( yvel0[OPS_ACC3(0,0,0)] + yvel0[OPS_ACC3(1,0,0)] + yvel0[OPS_ACC3(0,0,1)] + yvel0[OPS_ACC3(1,0,1)] + yvel0[OPS_ACC3(0,0,0)] + yvel0[OPS_ACC3(1,0,0)] + yvel0[OPS_ACC3(0,0,1)] + yvel0[OPS_ACC3(1,0,1)] ) ) * 0.125* dt * 0.5; top_flux = ( yarea[OPS_ACC2(0,1,0)] * ( yvel0[OPS_ACC3(0,1,0)] + yvel0[OPS_ACC3(1,1,0)] + yvel0[OPS_ACC3(0,1,1)] + yvel0[OPS_ACC3(1,1,1)] + yvel0[OPS_ACC3(0,1,0)] + yvel0[OPS_ACC3(1,1,0)] + yvel0[OPS_ACC3(0,1,1)] + yvel0[OPS_ACC3(1,1,1)] ) ) * 0.125 * dt * 0.5; back_flux = ( zarea[OPS_ACC12(0,0,0)] * ( zvel0[OPS_ACC13(0,0,0)] + zvel0[OPS_ACC13(1,0,0)] + zvel0[OPS_ACC13(0,1,0)] + zvel0[OPS_ACC13(1,1,0)] + zvel0[OPS_ACC13(0,0,0)] + zvel0[OPS_ACC13(1,0,0)] + zvel0[OPS_ACC13(0,1,0)] + zvel0[OPS_ACC13(1,1,0)] ) ) * 0.125* dt * 0.5; front_flux = ( zarea[OPS_ACC12(0,0,1)] * ( zvel0[OPS_ACC13(0,0,1)] + zvel0[OPS_ACC13(1,0,1)] + zvel0[OPS_ACC13(0,1,1)] + zvel0[OPS_ACC13(1,1,1)] + zvel0[OPS_ACC13(0,0,1)] + zvel0[OPS_ACC13(1,0,1)] + zvel0[OPS_ACC13(0,1,1)] + zvel0[OPS_ACC13(1,1,1)] ) ) * 0.125 * dt * 0.5; total_flux = right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux; volume_change[OPS_ACC4(0,0,0)] = (volume[OPS_ACC5(0,0,0)])/(volume[OPS_ACC5(0,0,0)] + total_flux); min_cell_volume = MIN( volume[OPS_ACC5(0,0,0)] + right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux, MIN( volume[OPS_ACC5(0,0,0)] + right_flux - left_flux + top_flux - bottom_flux , MIN( volume[OPS_ACC5(0,0,0)] + right_flux - left_flux, volume[OPS_ACC5(0,0,0)] + top_flux - bottom_flux) )); recip_volume = 1.0/volume[OPS_ACC5(0,0,0)]; energy_change = ( pressure[OPS_ACC6(0,0,0)]/density0[OPS_ACC7(0,0,0)] + viscosity[OPS_ACC9(0,0,0)]/density0[OPS_ACC7(0,0,0)] ) * total_flux * recip_volume; energy1[OPS_ACC11(0,0,0)] = energy0[OPS_ACC10(0,0,0)] - energy_change; density1[OPS_ACC8(0,0,0)] = density0[OPS_ACC7(0,0,0)] * volume_change[OPS_ACC4(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 __global__ void ops_PdV_kernel_predict( const double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, double* __restrict arg4, const double* __restrict arg5, const double* __restrict arg6, const double* __restrict arg7, double* __restrict arg8, const double* __restrict arg9, const double* __restrict arg10, double* __restrict arg11, const double* __restrict arg12, const double* __restrict arg13, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_PdV_kernel_predict + idx_z * 1 * xdim0_PdV_kernel_predict * ydim0_PdV_kernel_predict; arg1 += idx_x * 1 + idx_y * 1 * xdim1_PdV_kernel_predict + idx_z * 1 * xdim1_PdV_kernel_predict * ydim1_PdV_kernel_predict; arg2 += idx_x * 1 + idx_y * 1 * xdim2_PdV_kernel_predict + idx_z * 1 * xdim2_PdV_kernel_predict * ydim2_PdV_kernel_predict; arg3 += idx_x * 1 + idx_y * 1 * xdim3_PdV_kernel_predict + idx_z * 1 * xdim3_PdV_kernel_predict * ydim3_PdV_kernel_predict; arg4 += idx_x * 1 + idx_y * 1 * xdim4_PdV_kernel_predict + idx_z * 1 * xdim4_PdV_kernel_predict * ydim4_PdV_kernel_predict; arg5 += idx_x * 1 + idx_y * 1 * xdim5_PdV_kernel_predict + idx_z * 1 * xdim5_PdV_kernel_predict * ydim5_PdV_kernel_predict; arg6 += idx_x * 1 + idx_y * 1 * xdim6_PdV_kernel_predict + idx_z * 1 * xdim6_PdV_kernel_predict * ydim6_PdV_kernel_predict; arg7 += idx_x * 1 + idx_y * 1 * xdim7_PdV_kernel_predict + idx_z * 1 * xdim7_PdV_kernel_predict * ydim7_PdV_kernel_predict; arg8 += idx_x * 1 + idx_y * 1 * xdim8_PdV_kernel_predict + idx_z * 1 * xdim8_PdV_kernel_predict * ydim8_PdV_kernel_predict; arg9 += idx_x * 1 + idx_y * 1 * xdim9_PdV_kernel_predict + idx_z * 1 * xdim9_PdV_kernel_predict * ydim9_PdV_kernel_predict; arg10 += idx_x * 1 + idx_y * 1 * xdim10_PdV_kernel_predict + idx_z * 1 * xdim10_PdV_kernel_predict * ydim10_PdV_kernel_predict; arg11 += idx_x * 1 + idx_y * 1 * xdim11_PdV_kernel_predict + idx_z * 1 * xdim11_PdV_kernel_predict * ydim11_PdV_kernel_predict; arg12 += idx_x * 1 + idx_y * 1 * xdim12_PdV_kernel_predict + idx_z * 1 * xdim12_PdV_kernel_predict * ydim12_PdV_kernel_predict; arg13 += idx_x * 1 + idx_y * 1 * xdim13_PdV_kernel_predict + idx_z * 1 * xdim13_PdV_kernel_predict * ydim13_PdV_kernel_predict; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { PdV_kernel_predict(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13); } } // host stub function void ops_par_loop_PdV_kernel_predict(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) { ops_arg args[14] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13}; ops_timing_realloc(5,"PdV_kernel_predict"); OPS_kernels[5].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]*args[7].dat->dim; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]*args[8].dat->dim; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]*args[9].dat->dim; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]*args[10].dat->dim; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]*args[11].dat->dim; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]*args[12].dat->dim; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]*args[13].dat->dim; int ydim13 = args[13].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_PdV_kernel_predict_h || ydim0 != ydim0_PdV_kernel_predict_h || xdim1 != xdim1_PdV_kernel_predict_h || ydim1 != ydim1_PdV_kernel_predict_h || xdim2 != xdim2_PdV_kernel_predict_h || ydim2 != ydim2_PdV_kernel_predict_h || xdim3 != xdim3_PdV_kernel_predict_h || ydim3 != ydim3_PdV_kernel_predict_h || xdim4 != xdim4_PdV_kernel_predict_h || ydim4 != ydim4_PdV_kernel_predict_h || xdim5 != xdim5_PdV_kernel_predict_h || ydim5 != ydim5_PdV_kernel_predict_h || xdim6 != xdim6_PdV_kernel_predict_h || ydim6 != ydim6_PdV_kernel_predict_h || xdim7 != xdim7_PdV_kernel_predict_h || ydim7 != ydim7_PdV_kernel_predict_h || xdim8 != xdim8_PdV_kernel_predict_h || ydim8 != ydim8_PdV_kernel_predict_h || xdim9 != xdim9_PdV_kernel_predict_h || ydim9 != ydim9_PdV_kernel_predict_h || xdim10 != xdim10_PdV_kernel_predict_h || ydim10 != ydim10_PdV_kernel_predict_h || xdim11 != xdim11_PdV_kernel_predict_h || ydim11 != ydim11_PdV_kernel_predict_h || xdim12 != xdim12_PdV_kernel_predict_h || ydim12 != ydim12_PdV_kernel_predict_h || xdim13 != xdim13_PdV_kernel_predict_h || ydim13 != ydim13_PdV_kernel_predict_h) { hipMemcpyToSymbol( xdim0_PdV_kernel_predict, &xdim0, sizeof(int) ); xdim0_PdV_kernel_predict_h = xdim0; hipMemcpyToSymbol( ydim0_PdV_kernel_predict, &ydim0, sizeof(int) ); ydim0_PdV_kernel_predict_h = ydim0; hipMemcpyToSymbol( xdim1_PdV_kernel_predict, &xdim1, sizeof(int) ); xdim1_PdV_kernel_predict_h = xdim1; hipMemcpyToSymbol( ydim1_PdV_kernel_predict, &ydim1, sizeof(int) ); ydim1_PdV_kernel_predict_h = ydim1; hipMemcpyToSymbol( xdim2_PdV_kernel_predict, &xdim2, sizeof(int) ); xdim2_PdV_kernel_predict_h = xdim2; hipMemcpyToSymbol( ydim2_PdV_kernel_predict, &ydim2, sizeof(int) ); ydim2_PdV_kernel_predict_h = ydim2; hipMemcpyToSymbol( xdim3_PdV_kernel_predict, &xdim3, sizeof(int) ); xdim3_PdV_kernel_predict_h = xdim3; hipMemcpyToSymbol( ydim3_PdV_kernel_predict, &ydim3, sizeof(int) ); ydim3_PdV_kernel_predict_h = ydim3; hipMemcpyToSymbol( xdim4_PdV_kernel_predict, &xdim4, sizeof(int) ); xdim4_PdV_kernel_predict_h = xdim4; hipMemcpyToSymbol( ydim4_PdV_kernel_predict, &ydim4, sizeof(int) ); ydim4_PdV_kernel_predict_h = ydim4; hipMemcpyToSymbol( xdim5_PdV_kernel_predict, &xdim5, sizeof(int) ); xdim5_PdV_kernel_predict_h = xdim5; hipMemcpyToSymbol( ydim5_PdV_kernel_predict, &ydim5, sizeof(int) ); ydim5_PdV_kernel_predict_h = ydim5; hipMemcpyToSymbol( xdim6_PdV_kernel_predict, &xdim6, sizeof(int) ); xdim6_PdV_kernel_predict_h = xdim6; hipMemcpyToSymbol( ydim6_PdV_kernel_predict, &ydim6, sizeof(int) ); ydim6_PdV_kernel_predict_h = ydim6; hipMemcpyToSymbol( xdim7_PdV_kernel_predict, &xdim7, sizeof(int) ); xdim7_PdV_kernel_predict_h = xdim7; hipMemcpyToSymbol( ydim7_PdV_kernel_predict, &ydim7, sizeof(int) ); ydim7_PdV_kernel_predict_h = ydim7; hipMemcpyToSymbol( xdim8_PdV_kernel_predict, &xdim8, sizeof(int) ); xdim8_PdV_kernel_predict_h = xdim8; hipMemcpyToSymbol( ydim8_PdV_kernel_predict, &ydim8, sizeof(int) ); ydim8_PdV_kernel_predict_h = ydim8; hipMemcpyToSymbol( xdim9_PdV_kernel_predict, &xdim9, sizeof(int) ); xdim9_PdV_kernel_predict_h = xdim9; hipMemcpyToSymbol( ydim9_PdV_kernel_predict, &ydim9, sizeof(int) ); ydim9_PdV_kernel_predict_h = ydim9; hipMemcpyToSymbol( xdim10_PdV_kernel_predict, &xdim10, sizeof(int) ); xdim10_PdV_kernel_predict_h = xdim10; hipMemcpyToSymbol( ydim10_PdV_kernel_predict, &ydim10, sizeof(int) ); ydim10_PdV_kernel_predict_h = ydim10; hipMemcpyToSymbol( xdim11_PdV_kernel_predict, &xdim11, sizeof(int) ); xdim11_PdV_kernel_predict_h = xdim11; hipMemcpyToSymbol( ydim11_PdV_kernel_predict, &ydim11, sizeof(int) ); ydim11_PdV_kernel_predict_h = ydim11; hipMemcpyToSymbol( xdim12_PdV_kernel_predict, &xdim12, sizeof(int) ); xdim12_PdV_kernel_predict_h = xdim12; hipMemcpyToSymbol( ydim12_PdV_kernel_predict, &ydim12, sizeof(int) ); ydim12_PdV_kernel_predict_h = ydim12; hipMemcpyToSymbol( xdim13_PdV_kernel_predict, &xdim13, sizeof(int) ); xdim13_PdV_kernel_predict_h = xdim13; hipMemcpyToSymbol( ydim13_PdV_kernel_predict, &ydim13, sizeof(int) ); ydim13_PdV_kernel_predict_h = ydim13; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; int dat11 = args[11].dat->elem_size; int dat12 = args[12].dat->elem_size; int dat13 = args[13].dat->elem_size; char *p_a[14]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif //OPS_MPI int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7+ dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7+ dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif //OPS_MPI int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8+ dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8+ dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif //OPS_MPI int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9+ dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9+ dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif //OPS_MPI int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10+ dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10+ dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d] + OPS_sub_dat_list[args[11].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d]; #endif //OPS_MPI int base11 = dat11 * 1 * (start[0] * args[11].stencil->stride[0] - args[11].dat->base[0] - d_m[0]); base11 = base11+ dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]); base11 = base11+ dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]); p_a[11] = (char *)args[11].data_d + base11; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d]; #endif //OPS_MPI int base12 = dat12 * 1 * (start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]); base12 = base12+ dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]); base12 = base12+ dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]); p_a[12] = (char *)args[12].data_d + base12; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d]; #endif //OPS_MPI int base13 = dat13 * 1 * (start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]); base13 = base13+ dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]); base13 = base13+ dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]); p_a[13] = (char *)args[13].data_d + base13; ops_H_D_exchanges_device(args, 14); ops_halo_exchanges(args,14,range); ops_timers_core(&c1,&t1); OPS_kernels[5].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_PdV_kernel_predict), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[5].time += t2-t1; ops_set_dirtybit_device(args, 14); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[8],range); ops_set_halo_dirtybit3(&args[11],range); //Update kernel record OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg6); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg7); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg8); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg9); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg10); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg11); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg12); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg13); }
94a2be252ed76e1d6e8deec7a813802407353674.cu
// // auto-generated by ops.py // __constant__ int xdim0_PdV_kernel_predict; int xdim0_PdV_kernel_predict_h = -1; __constant__ int ydim0_PdV_kernel_predict; int ydim0_PdV_kernel_predict_h = -1; __constant__ int xdim1_PdV_kernel_predict; int xdim1_PdV_kernel_predict_h = -1; __constant__ int ydim1_PdV_kernel_predict; int ydim1_PdV_kernel_predict_h = -1; __constant__ int xdim2_PdV_kernel_predict; int xdim2_PdV_kernel_predict_h = -1; __constant__ int ydim2_PdV_kernel_predict; int ydim2_PdV_kernel_predict_h = -1; __constant__ int xdim3_PdV_kernel_predict; int xdim3_PdV_kernel_predict_h = -1; __constant__ int ydim3_PdV_kernel_predict; int ydim3_PdV_kernel_predict_h = -1; __constant__ int xdim4_PdV_kernel_predict; int xdim4_PdV_kernel_predict_h = -1; __constant__ int ydim4_PdV_kernel_predict; int ydim4_PdV_kernel_predict_h = -1; __constant__ int xdim5_PdV_kernel_predict; int xdim5_PdV_kernel_predict_h = -1; __constant__ int ydim5_PdV_kernel_predict; int ydim5_PdV_kernel_predict_h = -1; __constant__ int xdim6_PdV_kernel_predict; int xdim6_PdV_kernel_predict_h = -1; __constant__ int ydim6_PdV_kernel_predict; int ydim6_PdV_kernel_predict_h = -1; __constant__ int xdim7_PdV_kernel_predict; int xdim7_PdV_kernel_predict_h = -1; __constant__ int ydim7_PdV_kernel_predict; int ydim7_PdV_kernel_predict_h = -1; __constant__ int xdim8_PdV_kernel_predict; int xdim8_PdV_kernel_predict_h = -1; __constant__ int ydim8_PdV_kernel_predict; int ydim8_PdV_kernel_predict_h = -1; __constant__ int xdim9_PdV_kernel_predict; int xdim9_PdV_kernel_predict_h = -1; __constant__ int ydim9_PdV_kernel_predict; int ydim9_PdV_kernel_predict_h = -1; __constant__ int xdim10_PdV_kernel_predict; int xdim10_PdV_kernel_predict_h = -1; __constant__ int ydim10_PdV_kernel_predict; int ydim10_PdV_kernel_predict_h = -1; __constant__ int xdim11_PdV_kernel_predict; int xdim11_PdV_kernel_predict_h = -1; __constant__ int ydim11_PdV_kernel_predict; int ydim11_PdV_kernel_predict_h = -1; __constant__ int xdim12_PdV_kernel_predict; int xdim12_PdV_kernel_predict_h = -1; __constant__ int ydim12_PdV_kernel_predict; int ydim12_PdV_kernel_predict_h = -1; __constant__ int xdim13_PdV_kernel_predict; int xdim13_PdV_kernel_predict_h = -1; __constant__ int ydim13_PdV_kernel_predict; int ydim13_PdV_kernel_predict_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_PdV_kernel_predict*(y)+xdim0_PdV_kernel_predict*ydim0_PdV_kernel_predict*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_PdV_kernel_predict*(y)+xdim1_PdV_kernel_predict*ydim1_PdV_kernel_predict*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_PdV_kernel_predict*(y)+xdim2_PdV_kernel_predict*ydim2_PdV_kernel_predict*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_PdV_kernel_predict*(y)+xdim3_PdV_kernel_predict*ydim3_PdV_kernel_predict*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_PdV_kernel_predict*(y)+xdim4_PdV_kernel_predict*ydim4_PdV_kernel_predict*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_PdV_kernel_predict*(y)+xdim5_PdV_kernel_predict*ydim5_PdV_kernel_predict*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_PdV_kernel_predict*(y)+xdim6_PdV_kernel_predict*ydim6_PdV_kernel_predict*(z)) #define OPS_ACC7(x,y,z) (x+xdim7_PdV_kernel_predict*(y)+xdim7_PdV_kernel_predict*ydim7_PdV_kernel_predict*(z)) #define OPS_ACC8(x,y,z) (x+xdim8_PdV_kernel_predict*(y)+xdim8_PdV_kernel_predict*ydim8_PdV_kernel_predict*(z)) #define OPS_ACC9(x,y,z) (x+xdim9_PdV_kernel_predict*(y)+xdim9_PdV_kernel_predict*ydim9_PdV_kernel_predict*(z)) #define OPS_ACC10(x,y,z) (x+xdim10_PdV_kernel_predict*(y)+xdim10_PdV_kernel_predict*ydim10_PdV_kernel_predict*(z)) #define OPS_ACC11(x,y,z) (x+xdim11_PdV_kernel_predict*(y)+xdim11_PdV_kernel_predict*ydim11_PdV_kernel_predict*(z)) #define OPS_ACC12(x,y,z) (x+xdim12_PdV_kernel_predict*(y)+xdim12_PdV_kernel_predict*ydim12_PdV_kernel_predict*(z)) #define OPS_ACC13(x,y,z) (x+xdim13_PdV_kernel_predict*(y)+xdim13_PdV_kernel_predict*ydim13_PdV_kernel_predict*(z)) //user function __device__ void PdV_kernel_predict(const double *xarea, const double *xvel0, const double *yarea, const double *yvel0, double *volume_change, const double *volume, const double *pressure, const double *density0, double *density1, const double *viscosity, const double *energy0, double *energy1, const double *zarea, const double *zvel0) { double recip_volume, energy_change, min_cell_volume; double right_flux, left_flux, top_flux, bottom_flux, back_flux, front_flux, total_flux; left_flux = ( xarea[OPS_ACC0(0,0,0)] * ( xvel0[OPS_ACC1(0,0,0)] + xvel0[OPS_ACC1(0,1,0)] + xvel0[OPS_ACC1(0,0,1)] + xvel0[OPS_ACC1(0,1,1)] + xvel0[OPS_ACC1(0,0,0)] + xvel0[OPS_ACC1(0,1,0)] + xvel0[OPS_ACC1(0,0,1)] + xvel0[OPS_ACC1(0,1,1)] ) ) * 0.125 * dt * 0.5; right_flux = ( xarea[OPS_ACC0(1,0,0)] * ( xvel0[OPS_ACC1(1,0,0)] + xvel0[OPS_ACC1(1,1,0)] + xvel0[OPS_ACC1(1,0,1)] + xvel0[OPS_ACC1(1,1,1)] + xvel0[OPS_ACC1(1,0,0)] + xvel0[OPS_ACC1(1,1,0)] + xvel0[OPS_ACC1(1,0,1)] + xvel0[OPS_ACC1(1,1,1)] ) ) * 0.125 * dt * 0.5; bottom_flux = ( yarea[OPS_ACC2(0,0,0)] * ( yvel0[OPS_ACC3(0,0,0)] + yvel0[OPS_ACC3(1,0,0)] + yvel0[OPS_ACC3(0,0,1)] + yvel0[OPS_ACC3(1,0,1)] + yvel0[OPS_ACC3(0,0,0)] + yvel0[OPS_ACC3(1,0,0)] + yvel0[OPS_ACC3(0,0,1)] + yvel0[OPS_ACC3(1,0,1)] ) ) * 0.125* dt * 0.5; top_flux = ( yarea[OPS_ACC2(0,1,0)] * ( yvel0[OPS_ACC3(0,1,0)] + yvel0[OPS_ACC3(1,1,0)] + yvel0[OPS_ACC3(0,1,1)] + yvel0[OPS_ACC3(1,1,1)] + yvel0[OPS_ACC3(0,1,0)] + yvel0[OPS_ACC3(1,1,0)] + yvel0[OPS_ACC3(0,1,1)] + yvel0[OPS_ACC3(1,1,1)] ) ) * 0.125 * dt * 0.5; back_flux = ( zarea[OPS_ACC12(0,0,0)] * ( zvel0[OPS_ACC13(0,0,0)] + zvel0[OPS_ACC13(1,0,0)] + zvel0[OPS_ACC13(0,1,0)] + zvel0[OPS_ACC13(1,1,0)] + zvel0[OPS_ACC13(0,0,0)] + zvel0[OPS_ACC13(1,0,0)] + zvel0[OPS_ACC13(0,1,0)] + zvel0[OPS_ACC13(1,1,0)] ) ) * 0.125* dt * 0.5; front_flux = ( zarea[OPS_ACC12(0,0,1)] * ( zvel0[OPS_ACC13(0,0,1)] + zvel0[OPS_ACC13(1,0,1)] + zvel0[OPS_ACC13(0,1,1)] + zvel0[OPS_ACC13(1,1,1)] + zvel0[OPS_ACC13(0,0,1)] + zvel0[OPS_ACC13(1,0,1)] + zvel0[OPS_ACC13(0,1,1)] + zvel0[OPS_ACC13(1,1,1)] ) ) * 0.125 * dt * 0.5; total_flux = right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux; volume_change[OPS_ACC4(0,0,0)] = (volume[OPS_ACC5(0,0,0)])/(volume[OPS_ACC5(0,0,0)] + total_flux); min_cell_volume = MIN( volume[OPS_ACC5(0,0,0)] + right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux, MIN( volume[OPS_ACC5(0,0,0)] + right_flux - left_flux + top_flux - bottom_flux , MIN( volume[OPS_ACC5(0,0,0)] + right_flux - left_flux, volume[OPS_ACC5(0,0,0)] + top_flux - bottom_flux) )); recip_volume = 1.0/volume[OPS_ACC5(0,0,0)]; energy_change = ( pressure[OPS_ACC6(0,0,0)]/density0[OPS_ACC7(0,0,0)] + viscosity[OPS_ACC9(0,0,0)]/density0[OPS_ACC7(0,0,0)] ) * total_flux * recip_volume; energy1[OPS_ACC11(0,0,0)] = energy0[OPS_ACC10(0,0,0)] - energy_change; density1[OPS_ACC8(0,0,0)] = density0[OPS_ACC7(0,0,0)] * volume_change[OPS_ACC4(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 __global__ void ops_PdV_kernel_predict( const double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, double* __restrict arg4, const double* __restrict arg5, const double* __restrict arg6, const double* __restrict arg7, double* __restrict arg8, const double* __restrict arg9, const double* __restrict arg10, double* __restrict arg11, const double* __restrict arg12, const double* __restrict arg13, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_PdV_kernel_predict + idx_z * 1 * xdim0_PdV_kernel_predict * ydim0_PdV_kernel_predict; arg1 += idx_x * 1 + idx_y * 1 * xdim1_PdV_kernel_predict + idx_z * 1 * xdim1_PdV_kernel_predict * ydim1_PdV_kernel_predict; arg2 += idx_x * 1 + idx_y * 1 * xdim2_PdV_kernel_predict + idx_z * 1 * xdim2_PdV_kernel_predict * ydim2_PdV_kernel_predict; arg3 += idx_x * 1 + idx_y * 1 * xdim3_PdV_kernel_predict + idx_z * 1 * xdim3_PdV_kernel_predict * ydim3_PdV_kernel_predict; arg4 += idx_x * 1 + idx_y * 1 * xdim4_PdV_kernel_predict + idx_z * 1 * xdim4_PdV_kernel_predict * ydim4_PdV_kernel_predict; arg5 += idx_x * 1 + idx_y * 1 * xdim5_PdV_kernel_predict + idx_z * 1 * xdim5_PdV_kernel_predict * ydim5_PdV_kernel_predict; arg6 += idx_x * 1 + idx_y * 1 * xdim6_PdV_kernel_predict + idx_z * 1 * xdim6_PdV_kernel_predict * ydim6_PdV_kernel_predict; arg7 += idx_x * 1 + idx_y * 1 * xdim7_PdV_kernel_predict + idx_z * 1 * xdim7_PdV_kernel_predict * ydim7_PdV_kernel_predict; arg8 += idx_x * 1 + idx_y * 1 * xdim8_PdV_kernel_predict + idx_z * 1 * xdim8_PdV_kernel_predict * ydim8_PdV_kernel_predict; arg9 += idx_x * 1 + idx_y * 1 * xdim9_PdV_kernel_predict + idx_z * 1 * xdim9_PdV_kernel_predict * ydim9_PdV_kernel_predict; arg10 += idx_x * 1 + idx_y * 1 * xdim10_PdV_kernel_predict + idx_z * 1 * xdim10_PdV_kernel_predict * ydim10_PdV_kernel_predict; arg11 += idx_x * 1 + idx_y * 1 * xdim11_PdV_kernel_predict + idx_z * 1 * xdim11_PdV_kernel_predict * ydim11_PdV_kernel_predict; arg12 += idx_x * 1 + idx_y * 1 * xdim12_PdV_kernel_predict + idx_z * 1 * xdim12_PdV_kernel_predict * ydim12_PdV_kernel_predict; arg13 += idx_x * 1 + idx_y * 1 * xdim13_PdV_kernel_predict + idx_z * 1 * xdim13_PdV_kernel_predict * ydim13_PdV_kernel_predict; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { PdV_kernel_predict(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13); } } // host stub function void ops_par_loop_PdV_kernel_predict(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) { ops_arg args[14] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13}; ops_timing_realloc(5,"PdV_kernel_predict"); OPS_kernels[5].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]*args[7].dat->dim; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]*args[8].dat->dim; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]*args[9].dat->dim; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]*args[10].dat->dim; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]*args[11].dat->dim; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]*args[12].dat->dim; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]*args[13].dat->dim; int ydim13 = args[13].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_PdV_kernel_predict_h || ydim0 != ydim0_PdV_kernel_predict_h || xdim1 != xdim1_PdV_kernel_predict_h || ydim1 != ydim1_PdV_kernel_predict_h || xdim2 != xdim2_PdV_kernel_predict_h || ydim2 != ydim2_PdV_kernel_predict_h || xdim3 != xdim3_PdV_kernel_predict_h || ydim3 != ydim3_PdV_kernel_predict_h || xdim4 != xdim4_PdV_kernel_predict_h || ydim4 != ydim4_PdV_kernel_predict_h || xdim5 != xdim5_PdV_kernel_predict_h || ydim5 != ydim5_PdV_kernel_predict_h || xdim6 != xdim6_PdV_kernel_predict_h || ydim6 != ydim6_PdV_kernel_predict_h || xdim7 != xdim7_PdV_kernel_predict_h || ydim7 != ydim7_PdV_kernel_predict_h || xdim8 != xdim8_PdV_kernel_predict_h || ydim8 != ydim8_PdV_kernel_predict_h || xdim9 != xdim9_PdV_kernel_predict_h || ydim9 != ydim9_PdV_kernel_predict_h || xdim10 != xdim10_PdV_kernel_predict_h || ydim10 != ydim10_PdV_kernel_predict_h || xdim11 != xdim11_PdV_kernel_predict_h || ydim11 != ydim11_PdV_kernel_predict_h || xdim12 != xdim12_PdV_kernel_predict_h || ydim12 != ydim12_PdV_kernel_predict_h || xdim13 != xdim13_PdV_kernel_predict_h || ydim13 != ydim13_PdV_kernel_predict_h) { cudaMemcpyToSymbol( xdim0_PdV_kernel_predict, &xdim0, sizeof(int) ); xdim0_PdV_kernel_predict_h = xdim0; cudaMemcpyToSymbol( ydim0_PdV_kernel_predict, &ydim0, sizeof(int) ); ydim0_PdV_kernel_predict_h = ydim0; cudaMemcpyToSymbol( xdim1_PdV_kernel_predict, &xdim1, sizeof(int) ); xdim1_PdV_kernel_predict_h = xdim1; cudaMemcpyToSymbol( ydim1_PdV_kernel_predict, &ydim1, sizeof(int) ); ydim1_PdV_kernel_predict_h = ydim1; cudaMemcpyToSymbol( xdim2_PdV_kernel_predict, &xdim2, sizeof(int) ); xdim2_PdV_kernel_predict_h = xdim2; cudaMemcpyToSymbol( ydim2_PdV_kernel_predict, &ydim2, sizeof(int) ); ydim2_PdV_kernel_predict_h = ydim2; cudaMemcpyToSymbol( xdim3_PdV_kernel_predict, &xdim3, sizeof(int) ); xdim3_PdV_kernel_predict_h = xdim3; cudaMemcpyToSymbol( ydim3_PdV_kernel_predict, &ydim3, sizeof(int) ); ydim3_PdV_kernel_predict_h = ydim3; cudaMemcpyToSymbol( xdim4_PdV_kernel_predict, &xdim4, sizeof(int) ); xdim4_PdV_kernel_predict_h = xdim4; cudaMemcpyToSymbol( ydim4_PdV_kernel_predict, &ydim4, sizeof(int) ); ydim4_PdV_kernel_predict_h = ydim4; cudaMemcpyToSymbol( xdim5_PdV_kernel_predict, &xdim5, sizeof(int) ); xdim5_PdV_kernel_predict_h = xdim5; cudaMemcpyToSymbol( ydim5_PdV_kernel_predict, &ydim5, sizeof(int) ); ydim5_PdV_kernel_predict_h = ydim5; cudaMemcpyToSymbol( xdim6_PdV_kernel_predict, &xdim6, sizeof(int) ); xdim6_PdV_kernel_predict_h = xdim6; cudaMemcpyToSymbol( ydim6_PdV_kernel_predict, &ydim6, sizeof(int) ); ydim6_PdV_kernel_predict_h = ydim6; cudaMemcpyToSymbol( xdim7_PdV_kernel_predict, &xdim7, sizeof(int) ); xdim7_PdV_kernel_predict_h = xdim7; cudaMemcpyToSymbol( ydim7_PdV_kernel_predict, &ydim7, sizeof(int) ); ydim7_PdV_kernel_predict_h = ydim7; cudaMemcpyToSymbol( xdim8_PdV_kernel_predict, &xdim8, sizeof(int) ); xdim8_PdV_kernel_predict_h = xdim8; cudaMemcpyToSymbol( ydim8_PdV_kernel_predict, &ydim8, sizeof(int) ); ydim8_PdV_kernel_predict_h = ydim8; cudaMemcpyToSymbol( xdim9_PdV_kernel_predict, &xdim9, sizeof(int) ); xdim9_PdV_kernel_predict_h = xdim9; cudaMemcpyToSymbol( ydim9_PdV_kernel_predict, &ydim9, sizeof(int) ); ydim9_PdV_kernel_predict_h = ydim9; cudaMemcpyToSymbol( xdim10_PdV_kernel_predict, &xdim10, sizeof(int) ); xdim10_PdV_kernel_predict_h = xdim10; cudaMemcpyToSymbol( ydim10_PdV_kernel_predict, &ydim10, sizeof(int) ); ydim10_PdV_kernel_predict_h = ydim10; cudaMemcpyToSymbol( xdim11_PdV_kernel_predict, &xdim11, sizeof(int) ); xdim11_PdV_kernel_predict_h = xdim11; cudaMemcpyToSymbol( ydim11_PdV_kernel_predict, &ydim11, sizeof(int) ); ydim11_PdV_kernel_predict_h = ydim11; cudaMemcpyToSymbol( xdim12_PdV_kernel_predict, &xdim12, sizeof(int) ); xdim12_PdV_kernel_predict_h = xdim12; cudaMemcpyToSymbol( ydim12_PdV_kernel_predict, &ydim12, sizeof(int) ); ydim12_PdV_kernel_predict_h = ydim12; cudaMemcpyToSymbol( xdim13_PdV_kernel_predict, &xdim13, sizeof(int) ); xdim13_PdV_kernel_predict_h = xdim13; cudaMemcpyToSymbol( ydim13_PdV_kernel_predict, &ydim13, sizeof(int) ); ydim13_PdV_kernel_predict_h = ydim13; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; int dat11 = args[11].dat->elem_size; int dat12 = args[12].dat->elem_size; int dat13 = args[13].dat->elem_size; char *p_a[14]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif //OPS_MPI int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7+ dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7+ dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif //OPS_MPI int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8+ dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8+ dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif //OPS_MPI int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9+ dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9+ dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif //OPS_MPI int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10+ dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10+ dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d] + OPS_sub_dat_list[args[11].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d]; #endif //OPS_MPI int base11 = dat11 * 1 * (start[0] * args[11].stencil->stride[0] - args[11].dat->base[0] - d_m[0]); base11 = base11+ dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]); base11 = base11+ dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]); p_a[11] = (char *)args[11].data_d + base11; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d]; #endif //OPS_MPI int base12 = dat12 * 1 * (start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]); base12 = base12+ dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]); base12 = base12+ dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]); p_a[12] = (char *)args[12].data_d + base12; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d]; #endif //OPS_MPI int base13 = dat13 * 1 * (start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]); base13 = base13+ dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]); base13 = base13+ dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]); p_a[13] = (char *)args[13].data_d + base13; ops_H_D_exchanges_device(args, 14); ops_halo_exchanges(args,14,range); ops_timers_core(&c1,&t1); OPS_kernels[5].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_PdV_kernel_predict<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[5].time += t2-t1; ops_set_dirtybit_device(args, 14); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[8],range); ops_set_halo_dirtybit3(&args[11],range); //Update kernel record OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg6); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg7); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg8); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg9); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg10); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg11); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg12); OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg13); }
2a9a2b29b571a8450d8fd5ad8c68364f93652970.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2014-2015 NVIDIA Corporation. All rights reserved. * * Sample CUPTI app to demonstrate the usage of unified memory counter profiling * */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <cupti.h> #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0) #define DRIVER_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \ __FILE__, __LINE__, #apiFuncCall, _status); \ exit(-1); \ } \ } while (0) #define RUNTIME_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\ exit(-1); \ } \ } while (0) #define BUF_SIZE (8 * 1024) #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) static const char * getUvmCounterKindString(CUpti_ActivityUnifiedMemoryCounterKind kind) { switch (kind) { case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD: return "BYTES_TRANSFER_HTOD"; case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH: return "BYTES_TRANSFER_DTOH"; default: break; } return "<unknown>"; } static void printActivity(CUpti_Activity *record) { switch (record->kind) { case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER: { CUpti_ActivityUnifiedMemoryCounter2 *uvm = (CUpti_ActivityUnifiedMemoryCounter2 *)record; printf("UNIFIED_MEMORY_COUNTER [ %llu %llu ] kind=%s value=%llu src %u dst %u\n", (unsigned long long)(uvm->start), (unsigned long long)(uvm->end), getUvmCounterKindString(uvm->counterKind), (unsigned long long)uvm->value, uvm->srcId, uvm->dstId); break; } default: printf(" <unknown>\n"); break; } } static void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords) { uint8_t *rawBuffer; *size = BUF_SIZE; rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE); *buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE); *maxNumRecords = 0; if (*buffer == NULL) { printf("Error: out of memory\n"); exit(-1); } } static void CUPTIAPI bufferCompleted(hipCtx_t ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize) { CUptiResult status; CUpti_Activity *record = NULL; do { status = cuptiActivityGetNextRecord(buffer, validSize, &record); if (status == CUPTI_SUCCESS) { printActivity(record); } else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) { break; } else { CUPTI_CALL(status); } } while (1); // report any records dropped from the queue size_t dropped; CUPTI_CALL(cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped)); if (dropped != 0) { printf("Dropped %u activity records\n", (unsigned int)dropped); } free(buffer); } template<class T> __host__ __device__ void checkData(const char *loc, T *data, int size, int expectedVal) { int i; for (i = 0; i < size / (int)sizeof(T); i++) { if (data[i] != expectedVal) { printf("Mismatch found on %s\n", loc); printf("Address 0x%p, Observed = 0x%x Expected = 0x%x\n", data+i, data[i], expectedVal); break; } } } template<class T> __host__ __device__ void writeData(T *data, int size, int writeVal) { int i; for (i = 0; i < size / (int)sizeof(T); i++) { data[i] = writeVal; } } __global__ void testKernel(int *data, int size, int expectedVal) { checkData("GPU", data, size, expectedVal); writeData(data, size, -expectedVal); } int main(int argc, char **argv) { CUptiResult res; int deviceCount; int *data = NULL; int size = 64*1024; // 64 KB int i = 123; CUpti_ActivityUnifiedMemoryCounterConfig config[2]; DRIVER_API_CALL(hipInit(0)); DRIVER_API_CALL(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } // register cupti activity buffer callbacks CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted)); // configure unified memory counters config[0].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE; config[0].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD; config[0].deviceId = 0; config[0].enable = 1; config[1].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE; config[1].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH; config[1].deviceId = 0; config[1].enable = 1; res = cuptiActivityConfigureUnifiedMemoryCounter(config, 2); if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED) { printf("Test is waived, unified memory is not supported on the underlying platform.\n"); return 0; } else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE) { printf("Test is waived, unified memory is not supported on the device.\n"); return 0; } else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES) { printf("Test is waived, unified memory is not supported on the non-P2P multi-gpu setup.\n"); return 0; } else { CUPTI_CALL(res); } // enable unified memory counter activity CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER)); // allocate unified memory printf("Allocation size in bytes %d\n", size); RUNTIME_API_CALL(hipMallocManaged(&data, size)); // CPU access writeData(data, size, i); // kernel launch hipLaunchKernelGGL(( testKernel), dim3(1),dim3(1), 0, 0, data, size, i); RUNTIME_API_CALL(hipDeviceSynchronize()); // CPU access checkData("CPU", data, size, -i); // free unified memory RUNTIME_API_CALL(hipFree(data)); CUPTI_CALL(cuptiActivityFlushAll(0)); // disable unified memory counter activity CUPTI_CALL(cuptiActivityDisable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER)); hipDeviceReset(); return 0; }
2a9a2b29b571a8450d8fd5ad8c68364f93652970.cu
/* * Copyright 2014-2015 NVIDIA Corporation. All rights reserved. * * Sample CUPTI app to demonstrate the usage of unified memory counter profiling * */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cupti.h> #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0) #define DRIVER_API_CALL(apiFuncCall) \ do { \ CUresult _status = apiFuncCall; \ if (_status != CUDA_SUCCESS) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \ __FILE__, __LINE__, #apiFuncCall, _status); \ exit(-1); \ } \ } while (0) #define RUNTIME_API_CALL(apiFuncCall) \ do { \ cudaError_t _status = apiFuncCall; \ if (_status != cudaSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\ exit(-1); \ } \ } while (0) #define BUF_SIZE (8 * 1024) #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) static const char * getUvmCounterKindString(CUpti_ActivityUnifiedMemoryCounterKind kind) { switch (kind) { case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD: return "BYTES_TRANSFER_HTOD"; case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH: return "BYTES_TRANSFER_DTOH"; default: break; } return "<unknown>"; } static void printActivity(CUpti_Activity *record) { switch (record->kind) { case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER: { CUpti_ActivityUnifiedMemoryCounter2 *uvm = (CUpti_ActivityUnifiedMemoryCounter2 *)record; printf("UNIFIED_MEMORY_COUNTER [ %llu %llu ] kind=%s value=%llu src %u dst %u\n", (unsigned long long)(uvm->start), (unsigned long long)(uvm->end), getUvmCounterKindString(uvm->counterKind), (unsigned long long)uvm->value, uvm->srcId, uvm->dstId); break; } default: printf(" <unknown>\n"); break; } } static void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords) { uint8_t *rawBuffer; *size = BUF_SIZE; rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE); *buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE); *maxNumRecords = 0; if (*buffer == NULL) { printf("Error: out of memory\n"); exit(-1); } } static void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize) { CUptiResult status; CUpti_Activity *record = NULL; do { status = cuptiActivityGetNextRecord(buffer, validSize, &record); if (status == CUPTI_SUCCESS) { printActivity(record); } else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) { break; } else { CUPTI_CALL(status); } } while (1); // report any records dropped from the queue size_t dropped; CUPTI_CALL(cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped)); if (dropped != 0) { printf("Dropped %u activity records\n", (unsigned int)dropped); } free(buffer); } template<class T> __host__ __device__ void checkData(const char *loc, T *data, int size, int expectedVal) { int i; for (i = 0; i < size / (int)sizeof(T); i++) { if (data[i] != expectedVal) { printf("Mismatch found on %s\n", loc); printf("Address 0x%p, Observed = 0x%x Expected = 0x%x\n", data+i, data[i], expectedVal); break; } } } template<class T> __host__ __device__ void writeData(T *data, int size, int writeVal) { int i; for (i = 0; i < size / (int)sizeof(T); i++) { data[i] = writeVal; } } __global__ void testKernel(int *data, int size, int expectedVal) { checkData("GPU", data, size, expectedVal); writeData(data, size, -expectedVal); } int main(int argc, char **argv) { CUptiResult res; int deviceCount; int *data = NULL; int size = 64*1024; // 64 KB int i = 123; CUpti_ActivityUnifiedMemoryCounterConfig config[2]; DRIVER_API_CALL(cuInit(0)); DRIVER_API_CALL(cuDeviceGetCount(&deviceCount)); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } // register cupti activity buffer callbacks CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted)); // configure unified memory counters config[0].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE; config[0].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD; config[0].deviceId = 0; config[0].enable = 1; config[1].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE; config[1].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH; config[1].deviceId = 0; config[1].enable = 1; res = cuptiActivityConfigureUnifiedMemoryCounter(config, 2); if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED) { printf("Test is waived, unified memory is not supported on the underlying platform.\n"); return 0; } else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE) { printf("Test is waived, unified memory is not supported on the device.\n"); return 0; } else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES) { printf("Test is waived, unified memory is not supported on the non-P2P multi-gpu setup.\n"); return 0; } else { CUPTI_CALL(res); } // enable unified memory counter activity CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER)); // allocate unified memory printf("Allocation size in bytes %d\n", size); RUNTIME_API_CALL(cudaMallocManaged(&data, size)); // CPU access writeData(data, size, i); // kernel launch testKernel<<<1,1>>>(data, size, i); RUNTIME_API_CALL(cudaDeviceSynchronize()); // CPU access checkData("CPU", data, size, -i); // free unified memory RUNTIME_API_CALL(cudaFree(data)); CUPTI_CALL(cuptiActivityFlushAll(0)); // disable unified memory counter activity CUPTI_CALL(cuptiActivityDisable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER)); cudaDeviceReset(); return 0; }
c44e1eab6a51ffe3491fff17c25f37125cc74aca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void innerToeplitzCyclicReductionKernel( double *a_d, double *b_d, double *c_d, double *d_d, double *k1_d, double *k2_d, const double b1, const double ai, const double bi, const double ci, const double bn) { /* */ __shared__ double d_l[{{shared_size | int}}]; int tix = threadIdx.x; int offset = blockIdx.x*({{n}}+2)+1; int i, j, k; int idx; double d_j, d_k; /* When loading to shared memory, perform the first reduction step */ idx = 0; if (tix == ({{(n/2) | int}}-1)) { d_l[tix] = d_d[offset+2*tix+1] - \ d_d[offset+2*tix]*k1_d[idx]; d_d[offset+{{n}}] = d_d[offset+{{n}}]/bn; } else { d_l[tix] = d_d[offset+2*tix+1] - \ d_d[offset+2*tix]*k1_d[idx] - \ d_d[offset+2*tix+2]*k2_d[idx]; } __syncthreads(); /* First step of reduction is complete and the coefficients are in shared memory */ /* Do the remaining forward reduction steps: */ for (int stride=2; stride<{{(n/2) | int}}; stride=stride*2) { idx = idx + 1; i = (stride-1) + tix*stride; if (tix < {{n}}/(2*stride)) { if (i == ({{n}}/2-1)) { d_l[i] = d_l[i] - \ d_l[i-stride/2]*k1_d[idx]; } else { d_l[i] = d_l[i] - d_l[i-stride/2]*k1_d[idx] - \ d_l[i+stride/2]*k2_d[idx]; } } __syncthreads(); } if (tix == 0) { j = rint(log2((float) {{(n/2) | int}})) - 1; k = rint(log2((float) {{(n/2) | int}})); d_j = (d_l[{{n}}/4-1]*b_d[k] - \ c_d[j]*d_l[{{n}}/2-1])/ \ (b_d[j]*b_d[k] - c_d[j]*a_d[k]); d_k = (b_d[j]*d_l[{{n}}/2-1] - \ d_l[{{n}}/4-1]*a_d[k])/ \ (b_d[j]*b_d[k] - c_d[j]*a_d[k]); d_l[{{n}}/4-1] = d_j; d_l[{{n}}/2-1] = d_k; } __syncthreads(); idx = rint(log2((float) {{n}}))-2; for (int stride={{n}}/4; stride>1; stride=stride/2) { idx = idx - 1; i = (stride/2-1) + tix*stride; if (tix < {{n}}/(2*stride)){ if (tix == 0) { d_l[i] = (d_l[i] - c_d[idx]*d_l[i+stride/2])/\ b_d[idx]; } else { d_l[i] = (d_l[i] - a_d[idx]*d_l[i-stride/2] -\ c_d[idx]*d_l[i+stride/2])/b_d[idx]; } } __syncthreads(); } //When writing from shared memory, perform the last //substitution step if (tix == 0) { d_d[offset+2*tix] = (d_d[offset+2*tix] - ci*d_l[tix])/bi; d_d[offset+2*tix+1] = d_l[tix]; d_d[offset-1] = d_d[offset-1]/b1; } else { d_d[offset+2*tix] = (d_d[offset+2*tix] - \ ai*d_l[tix-1] - ci*d_l[tix])/bi; d_d[offset+2*tix+1] = d_l[tix]; } __syncthreads(); } __global__ void boundaryCorrectionKernel( double *d_d, double *x_UH_i_d, double *x_LH_i_d, double *a_reduced_d, double *b_reduced_d, double *c_reduced_d, double *c2_reduced_d, const double x_LH_1, const double x_UH_N) { int tix = blockIdx.x*blockDim.x + threadIdx.x; double bmac; /* first thread of the block makes and solves the block's reduced system */ __shared__ double d_reduced_d[4]; if (threadIdx.x == 0) { d_reduced_d[0] = -d_d[tix]; d_reduced_d[1] = -d_d[tix+1]; d_reduced_d[2] = -d_d[tix+({{n}})]; d_reduced_d[3] = -d_d[tix+({{n}}+1)]; /* each thread solves its reduced system */ c2_reduced_d[0] = c_reduced_d[0]/b_reduced_d[0]; d_reduced_d[0] = d_reduced_d[0]/b_reduced_d[0]; for (int i=1; i<4; i++) { bmac = b_reduced_d[i] - a_reduced_d[i]*c2_reduced_d[i-1]; c2_reduced_d[i] = c_reduced_d[i]/bmac; d_reduced_d[i] = (d_reduced_d[i] - a_reduced_d[i]*d_reduced_d[i-1])/bmac; } for (int i=2; i >= 0; i--) { d_reduced_d[i] = d_reduced_d[i] - c2_reduced_d[i]*d_reduced_d[i+1]; } } __syncthreads(); /* with the reduced solution, each thread computes the true solution */ if (threadIdx.x == 0) { d_d[tix] = d_d[tix] + x_LH_1*d_reduced_d[0]; } else if (threadIdx.x == ({{n}}+1)) { d_d[tix] = d_d[tix] + x_UH_N*d_reduced_d[3]; } else { d_d[tix] = d_d[tix] + x_UH_i_d[threadIdx.x-1]*d_reduced_d[1] + x_LH_i_d[threadIdx.x-1]*d_reduced_d[2]; } }
c44e1eab6a51ffe3491fff17c25f37125cc74aca.cu
__global__ void innerToeplitzCyclicReductionKernel( double *a_d, double *b_d, double *c_d, double *d_d, double *k1_d, double *k2_d, const double b1, const double ai, const double bi, const double ci, const double bn) { /* */ __shared__ double d_l[{{shared_size | int}}]; int tix = threadIdx.x; int offset = blockIdx.x*({{n}}+2)+1; int i, j, k; int idx; double d_j, d_k; /* When loading to shared memory, perform the first reduction step */ idx = 0; if (tix == ({{(n/2) | int}}-1)) { d_l[tix] = d_d[offset+2*tix+1] - \ d_d[offset+2*tix]*k1_d[idx]; d_d[offset+{{n}}] = d_d[offset+{{n}}]/bn; } else { d_l[tix] = d_d[offset+2*tix+1] - \ d_d[offset+2*tix]*k1_d[idx] - \ d_d[offset+2*tix+2]*k2_d[idx]; } __syncthreads(); /* First step of reduction is complete and the coefficients are in shared memory */ /* Do the remaining forward reduction steps: */ for (int stride=2; stride<{{(n/2) | int}}; stride=stride*2) { idx = idx + 1; i = (stride-1) + tix*stride; if (tix < {{n}}/(2*stride)) { if (i == ({{n}}/2-1)) { d_l[i] = d_l[i] - \ d_l[i-stride/2]*k1_d[idx]; } else { d_l[i] = d_l[i] - d_l[i-stride/2]*k1_d[idx] - \ d_l[i+stride/2]*k2_d[idx]; } } __syncthreads(); } if (tix == 0) { j = rint(log2((float) {{(n/2) | int}})) - 1; k = rint(log2((float) {{(n/2) | int}})); d_j = (d_l[{{n}}/4-1]*b_d[k] - \ c_d[j]*d_l[{{n}}/2-1])/ \ (b_d[j]*b_d[k] - c_d[j]*a_d[k]); d_k = (b_d[j]*d_l[{{n}}/2-1] - \ d_l[{{n}}/4-1]*a_d[k])/ \ (b_d[j]*b_d[k] - c_d[j]*a_d[k]); d_l[{{n}}/4-1] = d_j; d_l[{{n}}/2-1] = d_k; } __syncthreads(); idx = rint(log2((float) {{n}}))-2; for (int stride={{n}}/4; stride>1; stride=stride/2) { idx = idx - 1; i = (stride/2-1) + tix*stride; if (tix < {{n}}/(2*stride)){ if (tix == 0) { d_l[i] = (d_l[i] - c_d[idx]*d_l[i+stride/2])/\ b_d[idx]; } else { d_l[i] = (d_l[i] - a_d[idx]*d_l[i-stride/2] -\ c_d[idx]*d_l[i+stride/2])/b_d[idx]; } } __syncthreads(); } //When writing from shared memory, perform the last //substitution step if (tix == 0) { d_d[offset+2*tix] = (d_d[offset+2*tix] - ci*d_l[tix])/bi; d_d[offset+2*tix+1] = d_l[tix]; d_d[offset-1] = d_d[offset-1]/b1; } else { d_d[offset+2*tix] = (d_d[offset+2*tix] - \ ai*d_l[tix-1] - ci*d_l[tix])/bi; d_d[offset+2*tix+1] = d_l[tix]; } __syncthreads(); } __global__ void boundaryCorrectionKernel( double *d_d, double *x_UH_i_d, double *x_LH_i_d, double *a_reduced_d, double *b_reduced_d, double *c_reduced_d, double *c2_reduced_d, const double x_LH_1, const double x_UH_N) { int tix = blockIdx.x*blockDim.x + threadIdx.x; double bmac; /* first thread of the block makes and solves the block's reduced system */ __shared__ double d_reduced_d[4]; if (threadIdx.x == 0) { d_reduced_d[0] = -d_d[tix]; d_reduced_d[1] = -d_d[tix+1]; d_reduced_d[2] = -d_d[tix+({{n}})]; d_reduced_d[3] = -d_d[tix+({{n}}+1)]; /* each thread solves its reduced system */ c2_reduced_d[0] = c_reduced_d[0]/b_reduced_d[0]; d_reduced_d[0] = d_reduced_d[0]/b_reduced_d[0]; for (int i=1; i<4; i++) { bmac = b_reduced_d[i] - a_reduced_d[i]*c2_reduced_d[i-1]; c2_reduced_d[i] = c_reduced_d[i]/bmac; d_reduced_d[i] = (d_reduced_d[i] - a_reduced_d[i]*d_reduced_d[i-1])/bmac; } for (int i=2; i >= 0; i--) { d_reduced_d[i] = d_reduced_d[i] - c2_reduced_d[i]*d_reduced_d[i+1]; } } __syncthreads(); /* with the reduced solution, each thread computes the true solution */ if (threadIdx.x == 0) { d_d[tix] = d_d[tix] + x_LH_1*d_reduced_d[0]; } else if (threadIdx.x == ({{n}}+1)) { d_d[tix] = d_d[tix] + x_UH_N*d_reduced_d[3]; } else { d_d[tix] = d_d[tix] + x_UH_i_d[threadIdx.x-1]*d_reduced_d[1] + x_LH_i_d[threadIdx.x-1]*d_reduced_d[2]; } }
fa7fb3bbf4375f8d8f5cf39ce5c017aa864cbb10.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <vector> #include <hip/hip_runtime.h> #include "opencv2/cudev.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" texture<Ncv8u, 1, hipReadModeElementType> tex8u; texture<Ncv32u, 1, hipReadModeElementType> tex32u; texture<uint2, 1, hipReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static hipStream_t nppStream = 0; hipStream_t nppStGetActiveCUDAstream(void) { return nppStream; } hipStream_t nppStSetActiveCUDAstream(hipStream_t cudaStream) { hipStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = cv::cudev::blockScanInclusive<NUM_SCAN_THREADS>(curElemMod, shmem, threadIdx.x); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { hipChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = hipCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(hipUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } hipLaunchKernelGGL(( scanRows <T_in, T_out, tbDoSqr>) , dim3(roi.height), dim3(NUM_SCAN_THREADS), 0, nppStGetActiveCUDAstream(), d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { hipLaunchKernelGGL(( decimate_C1R <T, false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { hipChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } hipLaunchKernelGGL(( decimate_C1R <T, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { hipLaunchKernelGGL(( rectStdDev_32f_C1R <false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { hipChannelFormatDesc cfdTexSrc; hipChannelFormatDesc cfdTexSqr; cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); cfdTexSqr = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); hipLaunchKernelGGL(( rectStdDev_32f_C1R <true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); hipLaunchKernelGGL(( transpose <T>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = cv::cudev::blockScanInclusive<NUM_REMOVE_THREADS>(scanElem, shmem, threadIdx.x); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass1Scan <true, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid_partial(partSumNums[i+1]); if (grid_partial.x > 65535) { grid_partial.y = (grid_partial.x + 65534) / 65535; grid_partial.x = 65535; } if (grid_partial.x != 1) { hipLaunchKernelGGL(( removePass1Scan <false, true>) , dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], 0); } else { hipLaunchKernelGGL(( removePass1Scan <false, false>) , dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, 0); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid_local(partSumNums[i+1]); if (grid_local.x > 65535) { grid_local.y = (grid_local.x + 65534) / 65535; grid_local.x = 65535; } hipLaunchKernelGGL(( removePass2Adjust) , dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid_local(partSumNums[1]); hipLaunchKernelGGL(( removePass1Scan <true, false>) , dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass3Compact) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(hipMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), hipMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } #if defined __GNUC__ && (__GNUC__*100 + __GNUC_MINOR__ > 204) typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a; #else typedef Ncv32u Ncv32u_a; #endif NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, hipReadModeElementType> texSrc; texture <float, 1, hipReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterRowBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterColumnBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, hipReadModeElementType> tex_src1; texture<float, 2, hipReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = hipAddressModeClamp; tex_src1.addressMode[1] = hipAddressModeClamp; tex_src1.filterMode = hipFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = hipAddressModeClamp; tex_src0.addressMode[1] = hipAddressModeClamp; tex_src0.filterMode = hipFilterModeLinear; tex_src0.normalized = false; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); hipLaunchKernelGGL(( BlendFramesKernel), dim3(blocks), dim3(threads), 0, nppStGetActiveCUDAstream (), ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) // FP32 atomic add static __forceinline__ __device__ float _atomicAdd(float *addr, float val) { float old = *addr, assumed; do { assumed = old; old = int_as_float(__iAtomicCAS((int*)addr, float_as_int(assumed), float_as_int(val+assumed))); } while( assumed!=old ); return old; } #else #define _atomicAdd atomicAdd #endif __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { _atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( ForwardWarpKernel_PSF1x1) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( MemsetKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), 0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( ForwardWarpKernel_PSF2x2) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( NormalizeKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, hipReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = floorf (xBegin); float ceilXEnd = ceilf (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = floorf (yBegin); float ceilYEnd = ceilf (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (ceilf (x - 2.0f), 0.0f); float xmax = fmin (floorf (x + 2.0f), rw - 1.0f); float ymin = fmax (ceilf (y - 2.0f), 0.0f); float ymax = fmin (floorf (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture hipBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeSuperSample_32f) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = hipAddressModeMirror; texSrc2D.addressMode[1] = hipAddressModeMirror; texSrc2D.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); hipBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeBicubic) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; } #endif /* CUDA_DISABLER */
fa7fb3bbf4375f8d8f5cf39ce5c017aa864cbb10.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <vector> #include <cuda_runtime.h> #include "opencv2/cudev.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" texture<Ncv8u, 1, cudaReadModeElementType> tex8u; texture<Ncv32u, 1, cudaReadModeElementType> tex32u; texture<uint2, 1, cudaReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static cudaStream_t nppStream = 0; cudaStream_t nppStGetActiveCUDAstream(void) { return nppStream; } cudaStream_t nppStSetActiveCUDAstream(cudaStream_t cudaStream) { cudaStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = cv::cudev::blockScanInclusive<NUM_SCAN_THREADS>(curElemMod, shmem, threadIdx.x); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { cudaChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = cudaCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(cudaUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } scanRows <T_in, T_out, tbDoSqr> <<<roi.height, NUM_SCAN_THREADS, 0, nppStGetActiveCUDAstream()>>> (d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { decimate_C1R <T, false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { cudaChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } decimate_C1R <T, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { rectStdDev_32f_C1R <false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { cudaChannelFormatDesc cfdTexSrc; cudaChannelFormatDesc cfdTexSqr; cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); cfdTexSqr = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); rectStdDev_32f_C1R <true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); transpose <T> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = cv::cudev::blockScanInclusive<NUM_REMOVE_THREADS>(scanElem, shmem, threadIdx.x); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass1Scan <true, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid_partial(partSumNums[i+1]); if (grid_partial.x > 65535) { grid_partial.y = (grid_partial.x + 65534) / 65535; grid_partial.x = 65535; } if (grid_partial.x != 1) { removePass1Scan <false, true> <<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], 0); } else { removePass1Scan <false, false> <<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, 0); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid_local(partSumNums[i+1]); if (grid_local.x > 65535) { grid_local.y = (grid_local.x + 65534) / 65535; grid_local.x = 65535; } removePass2Adjust <<<grid_local, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid_local(partSumNums[1]); removePass1Scan <true, false> <<<grid_local, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass3Compact <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(cudaMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), cudaMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } #if defined __GNUC__ && (__GNUC__*100 + __GNUC_MINOR__ > 204) typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a; #else typedef Ncv32u Ncv32u_a; #endif NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, cudaReadModeElementType> texSrc; texture <float, 1, cudaReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterRowBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterColumnBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, cudaReadModeElementType> tex_src1; texture<float, 2, cudaReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = cudaAddressModeClamp; tex_src1.addressMode[1] = cudaAddressModeClamp; tex_src1.filterMode = cudaFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = cudaAddressModeClamp; tex_src0.addressMode[1] = cudaAddressModeClamp; tex_src0.filterMode = cudaFilterModeLinear; tex_src0.normalized = false; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); BlendFramesKernel<<<blocks, threads, 0, nppStGetActiveCUDAstream ()>>> (ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) // FP32 atomic add static __forceinline__ __device__ float _atomicAdd(float *addr, float val) { float old = *addr, assumed; do { assumed = old; old = int_as_float(__iAtomicCAS((int*)addr, float_as_int(assumed), float_as_int(val+assumed))); } while( assumed!=old ); return old; } #else #define _atomicAdd atomicAdd #endif __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { _atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); ForwardWarpKernel_PSF1x1 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); MemsetKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); ForwardWarpKernel_PSF2x2 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); NormalizeKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, cudaReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = floorf (xBegin); float ceilXEnd = ceilf (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = floorf (yBegin); float ceilYEnd = ceilf (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (ceilf (x - 2.0f), 0.0f); float xmax = fmin (floorf (x + 2.0f), rw - 1.0f); float ymin = fmax (ceilf (y - 2.0f), 0.0f); float ymax = fmin (floorf (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture cudaBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); resizeSuperSample_32f <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = cudaAddressModeMirror; texSrc2D.addressMode[1] = cudaAddressModeMirror; texSrc2D.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); cudaBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); resizeBicubic <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; } #endif /* CUDA_DISABLER */
d29ef1f7db35207f8b6d4fcbd212063554b9c5c5.hip
// !!! This is a file automatically generated by hipify!!! #include <sentinel.h> #include "fileutils.cuh" #include "dcat.cuh" #include "dchgrp.cuh" #include "dchgrp_getgrnam.cuh" #include "dchmod.cuh" #include "dchown.cuh" #include "dchown_getpwnam_hip.cuh" #include "dcmp_hip.cuh" #include "dcp.cuh" #include "dcp_isadir.cuh" #include "dgrep.cuh" #include "dls.cuh" #include "dmkdir.cuh" #include "dmore.cuh" #include "dmv.cuh" #include "drm.cuh" #include "drmdir.cuh" #include "dpwd_hip.cuh" #include "dcd.cuh" extern "C" bool sentinelFileUtilsExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*, char*, char*, intptr_t)); static sentinelExecutor _fileUtilsExecutor = { nullptr, "fileutils", sentinelFileUtilsExecutor, nullptr }; void sentinelRegisterFileUtils() { sentinelRegisterExecutor(&_fileUtilsExecutor, false, false); }
d29ef1f7db35207f8b6d4fcbd212063554b9c5c5.cu
#include <sentinel.h> #include "fileutils.cuh" #include "dcat.cuh" #include "dchgrp.cuh" #include "dchgrp_getgrnam.cuh" #include "dchmod.cuh" #include "dchown.cuh" #include "dchown_getpwnam.cuh" #include "dcmp.cuh" #include "dcp.cuh" #include "dcp_isadir.cuh" #include "dgrep.cuh" #include "dls.cuh" #include "dmkdir.cuh" #include "dmore.cuh" #include "dmv.cuh" #include "drm.cuh" #include "drmdir.cuh" #include "dpwd.cuh" #include "dcd.cuh" extern "C" bool sentinelFileUtilsExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*, char*, char*, intptr_t)); static sentinelExecutor _fileUtilsExecutor = { nullptr, "fileutils", sentinelFileUtilsExecutor, nullptr }; void sentinelRegisterFileUtils() { sentinelRegisterExecutor(&_fileUtilsExecutor, false, false); }
9252d1e43a25b1d94669941a1a101cc1204aeb99.hip
// !!! This is a file automatically generated by hipify!!! /* N-body code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cstdio> #include <cmath> #include <sys/time.h> #include "cs43805351.h" #include <hip/hip_runtime.h> static const int ThreadsPerBlock = 512; struct Data { // mass, 3D position, 3D velocity, 3D acceleration float mass, posx, posy, posz, velx, vely, velz, accx, accy, accz; }; static void outputBMP(const int nbodies, const Data* const data, const int step) { const int WIDTH = 512; unsigned char* bmp = new unsigned char [WIDTH * WIDTH]; for (int i = 0; i < WIDTH * WIDTH; i++) bmp[i] = 0; for (int i = 0; i < nbodies; i++) { const float fz = data[i].posz + 3.0f; if (fz > 0) { const float fx = data[i].posx; const float fy = data[i].posy; const float dsqr = fx * fx + fy * fy + fz * fz; const int x = atanf(fx / fz) * (WIDTH / 2) + (0.5f + WIDTH / 2); const int y = atanf(fy / fz) * (WIDTH / 2) + (0.5f + WIDTH / 2); int c = 140 - dsqr * 4.0f; if (c < 100) c = 100; if ((0 <= x) && (x < WIDTH) && (0 <= y) && (y < WIDTH)) { if (c > bmp[x + y * WIDTH]) bmp[x + y * WIDTH] = c; } } } char name[32]; sprintf(name, "nbody%d.bmp", step + 1000); writeBMP(WIDTH, WIDTH, bmp, name); delete [] bmp; } /******************************************************************************/ /*** generate input (based on SPLASH2) ****************************************/ /******************************************************************************/ static const int MASK = 0x7FFFFFFF; static int randx = 1; static double drnd() { const int lastrand = randx; randx = (1103515245 * randx + 12345) & MASK; return lastrand / 2147483648.0; } static void generateInput(const int nbodies, Data* const data) { const double rsc = 0.5890486225481; const double vsc = sqrt(1.0 / rsc); for (int i = 0; i < nbodies; i++) { data[i].mass = 1.0 / nbodies; const double r = 1.0 / sqrt(pow(drnd() * 0.999, -2.0 / 3.0) - 1); double x, y, z, sq; do { x = drnd() * 2.0 - 1.0; y = drnd() * 2.0 - 1.0; z = drnd() * 2.0 - 1.0; sq = x * x + y * y + z * z; } while (sq > 1.0); double scale = rsc * r / sqrt(sq); data[i].posx = x * scale; data[i].posy = y * scale; data[i].posz = z * scale; do { x = drnd(); y = drnd() * 0.1; } while (y > x * x * pow(1 - x * x, 3.5)); const double v = x * sqrt(2.0 / sqrt(1 + r * r)); do { x = drnd() * 2.0 - 1.0; y = drnd() * 2.0 - 1.0; z = drnd() * 2.0 - 1.0; sq = x * x + y * y + z * z; } while (sq > 1.0); scale = vsc * v / sqrt(sq); data[i].velx = x * scale; data[i].vely = y * scale; data[i].velz = z * scale; } for (int i = 0; i < nbodies; i++) { data[i].accx = 0; data[i].accy = 0; data[i].accz = 0; } } /******************************************************************************/ /*** compute force ************************************************************/ /******************************************************************************/ static __global__ void calculateForceKernel(const int nbodies, Data* const data, const int step, const float dthf) { const float epssq = 0.05f * 0.05f; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < nbodies) { const float px = data[idx].posx; const float py = data[idx].posy; const float pz = data[idx].posz; float ax = 0; float ay = 0; float az = 0; for (int j = 0; j < nbodies; j++) { const float dx = data[j].posx - px; const float dy = data[j].posy - py; const float dz = data[j].posz - pz; float tmp = dx * dx + dy * dy + dz * dz; tmp = 1.0f / sqrtf(tmp + epssq); tmp = data[j].mass * tmp * tmp * tmp; ax += dx * tmp; ay += dy * tmp; az += dz * tmp; } if (step > 0) { data[idx].velx += (ax - data[idx].accx) * dthf; data[idx].vely += (ay - data[idx].accy) * dthf; data[idx].velz += (az - data[idx].accz) * dthf; } data[idx].accx = ax; data[idx].accy = ay; data[idx].accz = az; } } /******************************************************************************/ /*** advance bodies ***********************************************************/ /******************************************************************************/ static __global__ void integrateKernel(const int nbodies, Data* const data, const float dthf) { const float dtime = dthf + dthf; int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < nbodies) { const float dvelx = data[idx].accx * dthf; const float dvely = data[idx].accy * dthf; const float dvelz = data[idx].accz * dthf; const float velhx = data[idx].velx + dvelx; const float velhy = data[idx].vely + dvely; const float velhz = data[idx].velz + dvelz; data[idx].posx += velhx * dtime; data[idx].posy += velhy * dtime; data[idx].posz += velhz * dtime; data[idx].velx = velhx + dvelx; data[idx].vely = velhy + dvely; data[idx].velz = velhz + dvelz; } } static void CheckCuda() { hipError_t e; hipDeviceSynchronize(); if(hipSuccess != (e = hipGetLastError())){ fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e)); exit(-1); } } /******************************************************************************/ int main(int argc, char *argv[]) { printf("N-body v1.1\n"); // check command line if (argc != 4) {fprintf(stderr, "USAGE: %s number_of_bodies number_of_timesteps generate_images\n", argv[0]); exit(-1);} const int nbodies = atoi(argv[1]); if (nbodies < 10) {fprintf(stderr, "ERROR: number_of_bodies must be at least 10\n"); exit(-1);} const int timesteps = atoi(argv[2]); if (timesteps < 1) {fprintf(stderr, "ERROR: number_of_timesteps must be at least 1\n"); exit(-1);} const int genimages = atoi(argv[3]); if ((genimages != 0) && (genimages != 1)) {fprintf(stderr, "ERROR: generate_images must be either 0 or 1\n"); exit(-1);} printf("bodies: %d\n", nbodies); printf("time steps: %d\n", timesteps); printf("images: %s\n", genimages ? "yes" : "no"); // allocate and initialize data Data* data = new Data [nbodies]; generateInput(nbodies, data); Data *d_data; const int size = nbodies * sizeof(Data); hipMalloc((void **)&d_data, size); //input to device if(hipSuccess != hipMemcpy(d_data, data, size, hipMemcpyHostToDevice)){fprintf(stderr, "copy to device failed"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // compute result for each time step const float dthf = 0.025f * 0.5f; for (int step = 0; step < timesteps; step++) { hipLaunchKernelGGL(( calculateForceKernel), dim3((nbodies + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, nbodies, d_data, step, dthf); hipLaunchKernelGGL(( integrateKernel), dim3((nbodies + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, nbodies, d_data, dthf); // write result to BMP file if (genimages){ if(hipSuccess != hipMemcpy(data, d_data, size, hipMemcpyHostToDevice)){fprintf(stderr, "copy to host failed"); exit(-1);} outputBMP(nbodies, data, step); } } hipDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.4f s\n", runtime); CheckCuda(); hipFree(d_data); delete [] data; return 0; }
9252d1e43a25b1d94669941a1a101cc1204aeb99.cu
/* N-body code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cstdio> #include <cmath> #include <sys/time.h> #include "cs43805351.h" #include <cuda.h> static const int ThreadsPerBlock = 512; struct Data { // mass, 3D position, 3D velocity, 3D acceleration float mass, posx, posy, posz, velx, vely, velz, accx, accy, accz; }; static void outputBMP(const int nbodies, const Data* const data, const int step) { const int WIDTH = 512; unsigned char* bmp = new unsigned char [WIDTH * WIDTH]; for (int i = 0; i < WIDTH * WIDTH; i++) bmp[i] = 0; for (int i = 0; i < nbodies; i++) { const float fz = data[i].posz + 3.0f; if (fz > 0) { const float fx = data[i].posx; const float fy = data[i].posy; const float dsqr = fx * fx + fy * fy + fz * fz; const int x = atanf(fx / fz) * (WIDTH / 2) + (0.5f + WIDTH / 2); const int y = atanf(fy / fz) * (WIDTH / 2) + (0.5f + WIDTH / 2); int c = 140 - dsqr * 4.0f; if (c < 100) c = 100; if ((0 <= x) && (x < WIDTH) && (0 <= y) && (y < WIDTH)) { if (c > bmp[x + y * WIDTH]) bmp[x + y * WIDTH] = c; } } } char name[32]; sprintf(name, "nbody%d.bmp", step + 1000); writeBMP(WIDTH, WIDTH, bmp, name); delete [] bmp; } /******************************************************************************/ /*** generate input (based on SPLASH2) ****************************************/ /******************************************************************************/ static const int MASK = 0x7FFFFFFF; static int randx = 1; static double drnd() { const int lastrand = randx; randx = (1103515245 * randx + 12345) & MASK; return lastrand / 2147483648.0; } static void generateInput(const int nbodies, Data* const data) { const double rsc = 0.5890486225481; const double vsc = sqrt(1.0 / rsc); for (int i = 0; i < nbodies; i++) { data[i].mass = 1.0 / nbodies; const double r = 1.0 / sqrt(pow(drnd() * 0.999, -2.0 / 3.0) - 1); double x, y, z, sq; do { x = drnd() * 2.0 - 1.0; y = drnd() * 2.0 - 1.0; z = drnd() * 2.0 - 1.0; sq = x * x + y * y + z * z; } while (sq > 1.0); double scale = rsc * r / sqrt(sq); data[i].posx = x * scale; data[i].posy = y * scale; data[i].posz = z * scale; do { x = drnd(); y = drnd() * 0.1; } while (y > x * x * pow(1 - x * x, 3.5)); const double v = x * sqrt(2.0 / sqrt(1 + r * r)); do { x = drnd() * 2.0 - 1.0; y = drnd() * 2.0 - 1.0; z = drnd() * 2.0 - 1.0; sq = x * x + y * y + z * z; } while (sq > 1.0); scale = vsc * v / sqrt(sq); data[i].velx = x * scale; data[i].vely = y * scale; data[i].velz = z * scale; } for (int i = 0; i < nbodies; i++) { data[i].accx = 0; data[i].accy = 0; data[i].accz = 0; } } /******************************************************************************/ /*** compute force ************************************************************/ /******************************************************************************/ static __global__ void calculateForceKernel(const int nbodies, Data* const data, const int step, const float dthf) { const float epssq = 0.05f * 0.05f; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < nbodies) { const float px = data[idx].posx; const float py = data[idx].posy; const float pz = data[idx].posz; float ax = 0; float ay = 0; float az = 0; for (int j = 0; j < nbodies; j++) { const float dx = data[j].posx - px; const float dy = data[j].posy - py; const float dz = data[j].posz - pz; float tmp = dx * dx + dy * dy + dz * dz; tmp = 1.0f / sqrtf(tmp + epssq); tmp = data[j].mass * tmp * tmp * tmp; ax += dx * tmp; ay += dy * tmp; az += dz * tmp; } if (step > 0) { data[idx].velx += (ax - data[idx].accx) * dthf; data[idx].vely += (ay - data[idx].accy) * dthf; data[idx].velz += (az - data[idx].accz) * dthf; } data[idx].accx = ax; data[idx].accy = ay; data[idx].accz = az; } } /******************************************************************************/ /*** advance bodies ***********************************************************/ /******************************************************************************/ static __global__ void integrateKernel(const int nbodies, Data* const data, const float dthf) { const float dtime = dthf + dthf; int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < nbodies) { const float dvelx = data[idx].accx * dthf; const float dvely = data[idx].accy * dthf; const float dvelz = data[idx].accz * dthf; const float velhx = data[idx].velx + dvelx; const float velhy = data[idx].vely + dvely; const float velhz = data[idx].velz + dvelz; data[idx].posx += velhx * dtime; data[idx].posy += velhy * dtime; data[idx].posz += velhz * dtime; data[idx].velx = velhx + dvelx; data[idx].vely = velhy + dvely; data[idx].velz = velhz + dvelz; } } static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if(cudaSuccess != (e = cudaGetLastError())){ fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } /******************************************************************************/ int main(int argc, char *argv[]) { printf("N-body v1.1\n"); // check command line if (argc != 4) {fprintf(stderr, "USAGE: %s number_of_bodies number_of_timesteps generate_images\n", argv[0]); exit(-1);} const int nbodies = atoi(argv[1]); if (nbodies < 10) {fprintf(stderr, "ERROR: number_of_bodies must be at least 10\n"); exit(-1);} const int timesteps = atoi(argv[2]); if (timesteps < 1) {fprintf(stderr, "ERROR: number_of_timesteps must be at least 1\n"); exit(-1);} const int genimages = atoi(argv[3]); if ((genimages != 0) && (genimages != 1)) {fprintf(stderr, "ERROR: generate_images must be either 0 or 1\n"); exit(-1);} printf("bodies: %d\n", nbodies); printf("time steps: %d\n", timesteps); printf("images: %s\n", genimages ? "yes" : "no"); // allocate and initialize data Data* data = new Data [nbodies]; generateInput(nbodies, data); Data *d_data; const int size = nbodies * sizeof(Data); cudaMalloc((void **)&d_data, size); //input to device if(cudaSuccess != cudaMemcpy(d_data, data, size, cudaMemcpyHostToDevice)){fprintf(stderr, "copy to device failed"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // compute result for each time step const float dthf = 0.025f * 0.5f; for (int step = 0; step < timesteps; step++) { calculateForceKernel<<<(nbodies + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(nbodies, d_data, step, dthf); integrateKernel<<<(nbodies + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(nbodies, d_data, dthf); // write result to BMP file if (genimages){ if(cudaSuccess != cudaMemcpy(data, d_data, size, cudaMemcpyHostToDevice)){fprintf(stderr, "copy to host failed"); exit(-1);} outputBMP(nbodies, data, step); } } cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.4f s\n", runtime); CheckCuda(); cudaFree(d_data); delete [] data; return 0; }
69fea8b8e4d7160b7e55d7c3d112f34ecb3daa6e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "uplo_acos.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( uplo_acos), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( uplo_acos), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( uplo_acos), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
69fea8b8e4d7160b7e55d7c3d112f34ecb3daa6e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "uplo_acos.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); uplo_acos<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { uplo_acos<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { uplo_acos<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0fdcbb0161ff975c94b7b8c6ddda0d01c174f227.hip
// !!! This is a file automatically generated by hipify!!! /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/upper_trs_kernels.hpp" #include <memory> #include <hip/hip_runtime.h> #include <hipsparse.h> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/solver/upper_trs.hpp> #include "core/solver/upper_trs_kernels.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/solver/common_trs_kernels.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The UPPER_TRS solver namespace. * * @ingroup upper_trs */ namespace upper_trs { void should_perform_transpose(std::shared_ptr<const CudaExecutor> exec, bool &do_transpose) { should_perform_transpose_kernel(exec, do_transpose); } void init_struct(std::shared_ptr<const CudaExecutor> exec, std::shared_ptr<solver::SolveStruct> &solve_struct) { init_struct_kernel(exec, solve_struct); } template <typename ValueType, typename IndexType> void generate(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType> *matrix, solver::SolveStruct *solve_struct, const gko::size_type num_rhs) { generate_kernel<ValueType, IndexType>(exec, matrix, solve_struct, num_rhs, true); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_UPPER_TRS_GENERATE_KERNEL); template <typename ValueType, typename IndexType> void solve(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType> *matrix, const solver::SolveStruct *solve_struct, matrix::Dense<ValueType> *trans_b, matrix::Dense<ValueType> *trans_x, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *x) { solve_kernel<ValueType, IndexType>(exec, matrix, solve_struct, trans_b, trans_x, b, x); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_UPPER_TRS_SOLVE_KERNEL); } // namespace upper_trs } // namespace cuda } // namespace kernels } // namespace gko
0fdcbb0161ff975c94b7b8c6ddda0d01c174f227.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/upper_trs_kernels.hpp" #include <memory> #include <cuda.h> #include <cusparse.h> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/solver/upper_trs.hpp> #include "core/solver/upper_trs_kernels.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/solver/common_trs_kernels.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The UPPER_TRS solver namespace. * * @ingroup upper_trs */ namespace upper_trs { void should_perform_transpose(std::shared_ptr<const CudaExecutor> exec, bool &do_transpose) { should_perform_transpose_kernel(exec, do_transpose); } void init_struct(std::shared_ptr<const CudaExecutor> exec, std::shared_ptr<solver::SolveStruct> &solve_struct) { init_struct_kernel(exec, solve_struct); } template <typename ValueType, typename IndexType> void generate(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType> *matrix, solver::SolveStruct *solve_struct, const gko::size_type num_rhs) { generate_kernel<ValueType, IndexType>(exec, matrix, solve_struct, num_rhs, true); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_UPPER_TRS_GENERATE_KERNEL); template <typename ValueType, typename IndexType> void solve(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType> *matrix, const solver::SolveStruct *solve_struct, matrix::Dense<ValueType> *trans_b, matrix::Dense<ValueType> *trans_x, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *x) { solve_kernel<ValueType, IndexType>(exec, matrix, solve_struct, trans_b, trans_x, b, x); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_UPPER_TRS_SOLVE_KERNEL); } // namespace upper_trs } // namespace cuda } // namespace kernels } // namespace gko
3a2002cdb5b27332b43b50ba17f675a66f9adec8.hip
// !!! This is a file automatically generated by hipify!!! #include "CUAPI.h" #if ( defined GPU && defined GRAVITY ) extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ]; extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ]; extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ]; #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ]; #endif extern real (*d_Flu_Array_G )[GRA_NIN ][ PS1*PS1*PS1 ]; extern double (*d_Corner_Array_G)[3]; #ifdef DUAL_ENERGY extern char (*d_DE_Array_G )[ PS1*PS1*PS1 ]; #endif extern real (*d_Pot_Array_T) [ CUBE(GRA_NXT) ]; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_MemAllocate_PoissonGravity // Description : Allocate device and host memory for the Poisson and Gravity solvers // // Parameter : Pot_NPG : Number of patch groups evaluated simultaneously by GPU //------------------------------------------------------------------------------------------------------- void CUAPI_MemAllocate_PoissonGravity( const int Pot_NPG ) { const long Pot_NP = 8*Pot_NPG; const long Rho_MemSize_P = sizeof(real )*Pot_NP*CUBE(RHO_NXT); const long Pot_MemSize_P_In = sizeof(real )*Pot_NP*CUBE(POT_NXT); const long Pot_MemSize_P_Out = sizeof(real )*Pot_NP*CUBE(GRA_NXT); # ifdef UNSPLIT_GRAVITY const long Pot_MemSize_USG_G = sizeof(real )*Pot_NP*CUBE(USG_NXT_G); const long Flu_MemSize_USG_G = sizeof(real )*Pot_NP*CUBE(PS1)*(GRA_NIN-1); # endif const long Flu_MemSize_G = sizeof(real )*Pot_NP*CUBE(PS1)*(GRA_NIN ); const long Corner_MemSize = sizeof(double)*Pot_NP*3; # ifdef DUAL_ENERGY const long DE_MemSize_G = sizeof(char )*Pot_NP*CUBE(PS1); # endif const long Pot_MemSize_T = sizeof(real )*Pot_NP*CUBE(GRA_NXT); // output the total memory requirement long TotalSize = Rho_MemSize_P + Pot_MemSize_P_In + Pot_MemSize_P_Out + Flu_MemSize_G + Pot_MemSize_T; # ifdef UNSPLIT_GRAVITY TotalSize += Pot_MemSize_USG_G + Flu_MemSize_USG_G; # endif # ifdef DUAL_ENERGY TotalSize += DE_MemSize_G; # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : total memory requirement in GPU Poisson and gravity solver = %ld MB\n", TotalSize/(1<<20) ); // allocate the device memory CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Rho_Array_P, Rho_MemSize_P ) ); CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Pot_Array_P_In, Pot_MemSize_P_In ) ); CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Pot_Array_P_Out, Pot_MemSize_P_Out ) ); # ifdef UNSPLIT_GRAVITY CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Pot_Array_USG_G, Pot_MemSize_USG_G ) ); CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Flu_Array_USG_G, Flu_MemSize_USG_G ) ); # endif CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Flu_Array_G, Flu_MemSize_G ) ); if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT ) CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Corner_Array_G, Corner_MemSize ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( hipMalloc( (void**) &d_DE_Array_G, DE_MemSize_G ) ); # endif CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Pot_Array_T, Pot_MemSize_T ) ); // allocate the host memory by CUDA for (int t=0; t<2; t++) { CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Rho_Array_P [t], Rho_MemSize_P ) ); CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Pot_Array_P_In [t], Pot_MemSize_P_In ) ); CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Pot_Array_P_Out[t], Pot_MemSize_P_Out ) ); # ifdef UNSPLIT_GRAVITY CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Pot_Array_USG_G[t], Pot_MemSize_USG_G ) ); CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Flu_Array_USG_G[t], Flu_MemSize_USG_G ) ); # endif CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Flu_Array_G [t], Flu_MemSize_G ) ); if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT ) CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Corner_Array_G [t], Corner_MemSize ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_DE_Array_G [t], DE_MemSize_G ) ); # endif CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Pot_Array_T [t], Pot_MemSize_T ) ); } // for (int t=0; t<2; t++) } // FUNCTION : CUAPI_MemAllocate_PoissonGravity #endif // #if ( defined GPU && defined GRAVITY )
3a2002cdb5b27332b43b50ba17f675a66f9adec8.cu
#include "CUAPI.h" #if ( defined GPU && defined GRAVITY ) extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ]; extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ]; extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ]; #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ]; #endif extern real (*d_Flu_Array_G )[GRA_NIN ][ PS1*PS1*PS1 ]; extern double (*d_Corner_Array_G)[3]; #ifdef DUAL_ENERGY extern char (*d_DE_Array_G )[ PS1*PS1*PS1 ]; #endif extern real (*d_Pot_Array_T) [ CUBE(GRA_NXT) ]; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_MemAllocate_PoissonGravity // Description : Allocate device and host memory for the Poisson and Gravity solvers // // Parameter : Pot_NPG : Number of patch groups evaluated simultaneously by GPU //------------------------------------------------------------------------------------------------------- void CUAPI_MemAllocate_PoissonGravity( const int Pot_NPG ) { const long Pot_NP = 8*Pot_NPG; const long Rho_MemSize_P = sizeof(real )*Pot_NP*CUBE(RHO_NXT); const long Pot_MemSize_P_In = sizeof(real )*Pot_NP*CUBE(POT_NXT); const long Pot_MemSize_P_Out = sizeof(real )*Pot_NP*CUBE(GRA_NXT); # ifdef UNSPLIT_GRAVITY const long Pot_MemSize_USG_G = sizeof(real )*Pot_NP*CUBE(USG_NXT_G); const long Flu_MemSize_USG_G = sizeof(real )*Pot_NP*CUBE(PS1)*(GRA_NIN-1); # endif const long Flu_MemSize_G = sizeof(real )*Pot_NP*CUBE(PS1)*(GRA_NIN ); const long Corner_MemSize = sizeof(double)*Pot_NP*3; # ifdef DUAL_ENERGY const long DE_MemSize_G = sizeof(char )*Pot_NP*CUBE(PS1); # endif const long Pot_MemSize_T = sizeof(real )*Pot_NP*CUBE(GRA_NXT); // output the total memory requirement long TotalSize = Rho_MemSize_P + Pot_MemSize_P_In + Pot_MemSize_P_Out + Flu_MemSize_G + Pot_MemSize_T; # ifdef UNSPLIT_GRAVITY TotalSize += Pot_MemSize_USG_G + Flu_MemSize_USG_G; # endif # ifdef DUAL_ENERGY TotalSize += DE_MemSize_G; # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : total memory requirement in GPU Poisson and gravity solver = %ld MB\n", TotalSize/(1<<20) ); // allocate the device memory CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Rho_Array_P, Rho_MemSize_P ) ); CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Pot_Array_P_In, Pot_MemSize_P_In ) ); CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Pot_Array_P_Out, Pot_MemSize_P_Out ) ); # ifdef UNSPLIT_GRAVITY CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Pot_Array_USG_G, Pot_MemSize_USG_G ) ); CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Flu_Array_USG_G, Flu_MemSize_USG_G ) ); # endif CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Flu_Array_G, Flu_MemSize_G ) ); if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT ) CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Corner_Array_G, Corner_MemSize ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_DE_Array_G, DE_MemSize_G ) ); # endif CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Pot_Array_T, Pot_MemSize_T ) ); // allocate the host memory by CUDA for (int t=0; t<2; t++) { CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Rho_Array_P [t], Rho_MemSize_P ) ); CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Pot_Array_P_In [t], Pot_MemSize_P_In ) ); CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Pot_Array_P_Out[t], Pot_MemSize_P_Out ) ); # ifdef UNSPLIT_GRAVITY CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Pot_Array_USG_G[t], Pot_MemSize_USG_G ) ); CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Flu_Array_USG_G[t], Flu_MemSize_USG_G ) ); # endif CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Flu_Array_G [t], Flu_MemSize_G ) ); if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT ) CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Corner_Array_G [t], Corner_MemSize ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_DE_Array_G [t], DE_MemSize_G ) ); # endif CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Pot_Array_T [t], Pot_MemSize_T ) ); } // for (int t=0; t<2; t++) } // FUNCTION : CUAPI_MemAllocate_PoissonGravity #endif // #if ( defined GPU && defined GRAVITY )
986a3e5fd5341d6278434f167e9be9933de7f865.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <time.h> #include <sys/time.h> #include "dproductkernel.h" int main(int argc, char* argv[]) { /* Initialize variables */ FILE *fp; size_t size; unsigned int rows= atoi(argv[3]); unsigned int cols= atoi(argv[4]); int CUDA_DEVICE = atoi(argv[5]); int THREADS = atoi(argv[6]); //printf("Rows: %d\nCols: %d\nDEVICE: %d\nTHREADS: %d\n", rows, cols, CUDA_DEVICE, THREADS); /* Declare and Malloc Host Variables */ size = (size_t)((size_t)rows * (size_t)cols); //printf("Size of data: %u\n", size); int BLOCKS; float *dataT = (float*)malloc((size_t)size * sizeof(float)); float *dataV = (float*)malloc((size_t)cols * sizeof(float)); float *host_results = (float*)malloc((size_t)rows * sizeof(float)); unsigned int jobs; /* Declare Kernel Variables */ float *dev_dataT; float *dev_dataV; float *results; /* Store data from files to host variables */ fp = fopen(argv[1], "r"); if (fp == NULL) { printf("Cannot Open the File"); return 0; } if(dataT == NULL) { printf("ERROR: Memory for data not allocated.\n"); } for(int i = 0; i < rows; i++) { for(int j = 0; j < cols; j++) { fscanf(fp, "%f", &dataT[(i * cols) + j]); } } fclose(fp); //printf("Data Read Successfully!!!\n"); fp = fopen(argv[2], "r"); if (fp == NULL) { printf("Cannot Open the File"); return 0; } if(dataV == NULL) { printf("ERROR: Memory for data not allocated.\n"); } for (int i = 0; i < cols; i++) { fscanf(fp, "%f", &dataV[i]); } fclose(fp); //printf("W Vector Read Successfully\n"); fflush(stdout); /* hipMalloc Kernel Variables */ hipError_t err = hipSetDevice(CUDA_DEVICE); if(err != hipSuccess) { printf("Error setting CUDA DEVICE\n"); exit(EXIT_FAILURE); } err = hipMalloc((float**) &dev_dataT, (size_t) size * (size_t) sizeof(float)); if(err != hipSuccess) { printf("Error mallocing data on GPU device\n"); } err = hipMalloc((float**) &dev_dataV, (size_t) cols * (size_t) sizeof(float)); if(err != hipSuccess) { printf("Error mallocing data on GPU device\n"); } err = hipMalloc((float**) &results, (size_t) rows * sizeof(float)); if(err != hipSuccess) { printf("Error mallocing results on GPU device\n"); } /* Copy Host Variables to Kernel Variables */ err = hipMemcpy(dev_dataT, dataT, (size_t)size * (size_t)sizeof(float), hipMemcpyHostToDevice); if(err != hipSuccess) { printf("Error copying data to GPU\n"); } err = hipMemcpy(dev_dataV, dataV, (size_t)cols * (size_t)sizeof(float), hipMemcpyHostToDevice); if(err != hipSuccess) { printf("Error copying data to GPU\n"); } jobs = rows; BLOCKS = (jobs + THREADS - 1)/THREADS; fflush(stdout); /* Kernel Function Call */ hipLaunchKernelGGL(( dproductkernel), dim3(BLOCKS),dim3(THREADS), 0, 0, rows,cols,dev_dataT,dev_dataV,results); /* Copy the results back to Host Memory */ err = hipMemcpy(host_results, results, rows * sizeof(float), hipMemcpyDeviceToHost); if(err != hipSuccess) { printf("Error copying data from GPU\n"); } /* Print the results */ for(int k = 0; k < jobs; k++) { printf("%f \n", host_results[k]); } printf("\n"); /* Free Cuda Memory */ hipFree( dev_dataT ); hipFree( dev_dataV ); hipFree( results ); return 0; }
986a3e5fd5341d6278434f167e9be9933de7f865.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <time.h> #include <sys/time.h> #include "dproductkernel.h" int main(int argc, char* argv[]) { /* Initialize variables */ FILE *fp; size_t size; unsigned int rows= atoi(argv[3]); unsigned int cols= atoi(argv[4]); int CUDA_DEVICE = atoi(argv[5]); int THREADS = atoi(argv[6]); //printf("Rows: %d\nCols: %d\nDEVICE: %d\nTHREADS: %d\n", rows, cols, CUDA_DEVICE, THREADS); /* Declare and Malloc Host Variables */ size = (size_t)((size_t)rows * (size_t)cols); //printf("Size of data: %u\n", size); int BLOCKS; float *dataT = (float*)malloc((size_t)size * sizeof(float)); float *dataV = (float*)malloc((size_t)cols * sizeof(float)); float *host_results = (float*)malloc((size_t)rows * sizeof(float)); unsigned int jobs; /* Declare Kernel Variables */ float *dev_dataT; float *dev_dataV; float *results; /* Store data from files to host variables */ fp = fopen(argv[1], "r"); if (fp == NULL) { printf("Cannot Open the File"); return 0; } if(dataT == NULL) { printf("ERROR: Memory for data not allocated.\n"); } for(int i = 0; i < rows; i++) { for(int j = 0; j < cols; j++) { fscanf(fp, "%f", &dataT[(i * cols) + j]); } } fclose(fp); //printf("Data Read Successfully!!!\n"); fp = fopen(argv[2], "r"); if (fp == NULL) { printf("Cannot Open the File"); return 0; } if(dataV == NULL) { printf("ERROR: Memory for data not allocated.\n"); } for (int i = 0; i < cols; i++) { fscanf(fp, "%f", &dataV[i]); } fclose(fp); //printf("W Vector Read Successfully\n"); fflush(stdout); /* cudaMalloc Kernel Variables */ cudaError err = cudaSetDevice(CUDA_DEVICE); if(err != cudaSuccess) { printf("Error setting CUDA DEVICE\n"); exit(EXIT_FAILURE); } err = cudaMalloc((float**) &dev_dataT, (size_t) size * (size_t) sizeof(float)); if(err != cudaSuccess) { printf("Error mallocing data on GPU device\n"); } err = cudaMalloc((float**) &dev_dataV, (size_t) cols * (size_t) sizeof(float)); if(err != cudaSuccess) { printf("Error mallocing data on GPU device\n"); } err = cudaMalloc((float**) &results, (size_t) rows * sizeof(float)); if(err != cudaSuccess) { printf("Error mallocing results on GPU device\n"); } /* Copy Host Variables to Kernel Variables */ err = cudaMemcpy(dev_dataT, dataT, (size_t)size * (size_t)sizeof(float), cudaMemcpyHostToDevice); if(err != cudaSuccess) { printf("Error copying data to GPU\n"); } err = cudaMemcpy(dev_dataV, dataV, (size_t)cols * (size_t)sizeof(float), cudaMemcpyHostToDevice); if(err != cudaSuccess) { printf("Error copying data to GPU\n"); } jobs = rows; BLOCKS = (jobs + THREADS - 1)/THREADS; fflush(stdout); /* Kernel Function Call */ dproductkernel<<<BLOCKS,THREADS>>>(rows,cols,dev_dataT,dev_dataV,results); /* Copy the results back to Host Memory */ err = cudaMemcpy(host_results, results, rows * sizeof(float), cudaMemcpyDeviceToHost); if(err != cudaSuccess) { printf("Error copying data from GPU\n"); } /* Print the results */ for(int k = 0; k < jobs; k++) { printf("%f \n", host_results[k]); } printf("\n"); /* Free Cuda Memory */ cudaFree( dev_dataT ); cudaFree( dev_dataV ); cudaFree( results ); return 0; }
f920ac8b460ac4d55a85b0a0af4a041f298eff52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../../../../common/book.h" #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); // (x1,x2,x3,x4)*(y1,y2,y3,y4) = x1y1 + x2y2 + x3y3 + x4y4; __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // 8 // i = 4 // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU HANDLE_ERROR( hipMalloc( (void**)&dev_a, N*sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b, N*sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( hipMemcpy( dev_a, a, N*sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b, b, N*sizeof(float), hipMemcpyHostToDevice ) ); hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( hipMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the gpu side HANDLE_ERROR( hipFree( dev_a ) ); HANDLE_ERROR( hipFree( dev_b ) ); HANDLE_ERROR( hipFree( dev_partial_c ) ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
f920ac8b460ac4d55a85b0a0af4a041f298eff52.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../../../../common/book.h" #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); // (x1,x2,x3,x4)*(y1,y2,y3,y4) = x1y1 + x2y2 + x3y3 + x4y4; __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // 8 // i = 4 // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( cudaMemcpy( dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice ) ); dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the gpu side HANDLE_ERROR( cudaFree( dev_a ) ); HANDLE_ERROR( cudaFree( dev_b ) ); HANDLE_ERROR( cudaFree( dev_partial_c ) ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
e996f2411eb6135b432ef6c3d2c6e4478f0db599.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrix_viz.h" #include <stdio.h> namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_visualizeMatrix( const float * mxData, const int mxCols, const int mxRows, uchar3 * img, const int width, const int height, const uchar3 zeroColor, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int mxCol = (float)mxCols*x/width; int mxRow = (float)mxRows*y/height; mxCol = min(max(0,mxCol),mxCols-1); mxRow = min(max(0,mxRow),mxRows-1); // printf("%d,%d\n",mxRow,mxCol); const float val = mxData[mxRow*mxCols + mxCol]; //const int val = 0.0f; if (val == 0.0f) { img[x + y*width] = zeroColor; return; } float a = min(max(0.0f,(val - minVal)/(val - maxVal)),1.0f); img[x + y*width] = make_uchar3(255*a,255*a,255*a); } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- void visualizeMatrix(const float * mxData, const int mxCols, const int mxRows, uchar3 * img, const int width, const int height, const uchar3 zeroColor, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); hipLaunchKernelGGL(( gpu_visualizeMatrix), dim3(grid),dim3(block), 0, 0, mxData,mxCols,mxRows,img,width,height,zeroColor,minVal,maxVal); } }
e996f2411eb6135b432ef6c3d2c6e4478f0db599.cu
#include "matrix_viz.h" #include <stdio.h> namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_visualizeMatrix( const float * mxData, const int mxCols, const int mxRows, uchar3 * img, const int width, const int height, const uchar3 zeroColor, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int mxCol = (float)mxCols*x/width; int mxRow = (float)mxRows*y/height; mxCol = min(max(0,mxCol),mxCols-1); mxRow = min(max(0,mxRow),mxRows-1); // printf("%d,%d\n",mxRow,mxCol); const float val = mxData[mxRow*mxCols + mxCol]; //const int val = 0.0f; if (val == 0.0f) { img[x + y*width] = zeroColor; return; } float a = min(max(0.0f,(val - minVal)/(val - maxVal)),1.0f); img[x + y*width] = make_uchar3(255*a,255*a,255*a); } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- void visualizeMatrix(const float * mxData, const int mxCols, const int mxRows, uchar3 * img, const int width, const int height, const uchar3 zeroColor, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_visualizeMatrix<<<grid,block>>>(mxData,mxCols,mxRows,img,width,height,zeroColor,minVal,maxVal); } }
2108d2ad11009a3bebdc0ddf64761bbe5d6ba528.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Lucas Tata / Salvatore Amico High Performance Computing on GPUs Final Project Collaborative Filtering GPU Implementation */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <limits.h> #include "../helper/util.h" #include "../helper/wtime.h" #define INPUT_SIZE 16000 //how many lines to read from the dataset #define SPARSE_SIZE 8000 //size of sparse matrix is sparse_size * sparse_size #define USER_SIZE 2048 #define ARTIST_SIZE 8192 #define LINE_SIZE 1024 #define RAND_RANGE 100 //sets the random number generation range #define NUM_RECOMMENDATIONS 5 //number of recommendations to generate for the user #define NUM_FEATURES 10 //number of features to generate for each user in the algorithm #define ITERATIONS 1 //how many iterations you want to run the algorithm with #define USER_ID 1 //indicates which user you want to generate recommendations for #define SHARED_SIZE 100 #define SPLIT 25 int *dataMatrix, *X, *Y, *X_T, *Y_T; //our output sparse matrix (users by artists, data is the play count) char **artists; char **users; char **artistNames; int endOfArtistIndex = 0; //keep tabs on how many artists are currently in there int endOfUserIndex = 0; //keep tabs on how many users are currently in there __global__ void gpu_als(int *x, int *user_row, int *user_pref, int * conf_I, int *conf, int num) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num) { for(int k = 0; k < NUM_FEATURES; k++) { user_row[tid * NUM_FEATURES + k] = x[tid*NUM_FEATURES + k]; if(user_row[tid * NUM_FEATURES + k] != 0) { user_pref[tid*NUM_FEATURES + k] = 1; } else { user_pref[tid*NUM_FEATURES + k] = user_row[tid*NUM_FEATURES + k]; } conf_I[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k]; conf[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k] + 1; } tid+= blockDim.x * gridDim.x; } } __global__ void gpu_mat_mat_multiply(int *a, int *b, int *c, int num_rows, int num_cols, int num_rows2) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num_rows*num_rows2) { int res = 0; int row = tid / num_rows; int col = tid % num_rows; for(int j = 0; j < num_cols; j++) { res += a[row * num_cols + j] * b[col + num_rows2 * j]; } c[tid] = res; tid+= blockDim.x * gridDim.x; } } __global__ void gpu_mat_mat_multiply_shared(int *a, int *b, int *c, int num_rows, int num_cols, int num_rows2) { __shared__ int sres[NUM_FEATURES]; int bid = blockIdx.x; while(bid < num_rows*num_rows2) { int row = bid / num_cols; int col = bid % num_cols; int tid = threadIdx.x; while(tid < num_cols) { sres[tid] = a[row * num_cols + tid] * b[col + num_rows2 * tid]; tid += blockDim.x; } __syncthreads(); for(int i = num_cols/2 ; i > 0; i /= 2) { if(threadIdx.x < i) { int temp = sres[threadIdx.x] + sres[threadIdx.x + i]; sres[threadIdx.x] = temp; } __syncthreads(); } if(threadIdx.x == 0) c[bid] = sres[threadIdx.x]; bid += gridDim.x; __syncthreads(); } } __global__ void gpu_mat_mat_multiply_atomic(int *a, int *b, int *c, int num_rows, int num_cols, int num_rows2) { int res; int bid = blockIdx.x; int tid = threadIdx.x; while(bid * SPLIT < num_rows*num_cols) { int row = bid / num_cols + tid / SPLIT; int col = bid % num_cols; res = a[row * num_cols + tid%SPLIT] * b[col + num_rows2 * tid%SPLIT]; __syncthreads(); atomicAdd(&c[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } } __global__ void gpu_mat_vec_multiply_shared(int *mat, int *vec, int *res, int num_rows, int num_cols) { __shared__ int svec[256]; __shared__ int sres[256]; svec[threadIdx.x] = vec[threadIdx.x]; int bid = blockIdx.x; __syncthreads(); while (bid < num_rows) { sres[threadIdx.x] = mat[bid * num_cols + threadIdx.x] * svec[threadIdx.x]; __syncthreads(); for(int i = blockDim.x/2 ; i > 0; i /= 2) { if(threadIdx.x < i) { int temp = sres[threadIdx.x] + sres[threadIdx.x + i]; sres[threadIdx.x] = temp; } __syncthreads(); } if(threadIdx.x == 0) res[bid] = sres[threadIdx.x]; bid += 128; __syncthreads(); } } __global__ void gpu_matrix_transpose(int *mat, int *res, int num_rows, int num_cols) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num_rows*num_cols) { int in_row = tid / num_cols; int in_col = tid % num_cols; int out_row = in_col; int out_col = in_row; res [out_row * num_cols + out_col] = mat [in_row * num_cols + in_col]; tid += blockDim.x * gridDim.x; } } __global__ void gpu_matrix_alpha(int *mat, int *res, float alpha_val, int num_rows, int num_cols) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num_rows*num_cols) { int in_row = tid / num_cols; int in_col = tid % num_cols; int out_row = in_col; int out_col = in_row; res [out_row * num_cols + out_col] = mat [in_row * num_cols + in_col]; tid += blockDim.x * gridDim.x; } } __global__ void gpu_matrix_addition(int *mat1, int *mat2, int *res, int num_rows, int num_cols) { int tid = threadIdx.x; while(tid < num_rows*num_cols) { int row = tid / num_cols; int col = tid % num_cols; res [row * num_cols + col] += mat1[row * num_cols + col] + mat2[row * num_cols + col]; tid += blockDim.x * gridDim.x; } } __global__ void gpu_mat_div(int *mat1, int *mat2, int *res, int num_rows, int num_cols) { int tid = threadIdx.x; while(tid < num_rows*num_cols) { int row = tid / num_cols; int col = tid % num_cols; res [row * num_cols + col] = mat1[row * num_cols + col] / mat2[row * num_cols + col]; tid += blockDim.x * gridDim.x; } } int checkIfArtistExistsInData(char * artist) { int i; for(i = 0; i < ARTIST_SIZE; i++) { if(strcmp(artist, artists[i]) == 0) { return i; } } return -1; } int checkIfUserExistsInData(char * user) { int i; for(i = 0; i < USER_SIZE; i++) { if(strcmp(user, users[i]) == 0) { return i; } } return -1; } void mat_mat_multiply(int *mat1, int *mat2, int *res, int num_rows1, int num_cols, int num_rows2) { for(int k = 0; k < num_rows1; k ++) { for(int i = 0; i < num_rows2; i ++) { int temp_res = 0; for (int j = 0; j < num_cols; j ++) { temp_res += mat1[i * num_cols + j] * mat2[k + num_rows2 * j]; } res[k+i*num_rows1] = temp_res; } } } void mat_vec_multiply(int *mat, int *vec, int *res, int num_rows, int num_cols) { for(int i = 0; i < num_rows; i ++) { int temp_res = 0; for (int j = 0; j < num_cols; j ++) { temp_res += mat[i * num_cols + j] * vec[j]; } res[i] = temp_res; } } __global__ void implicit_als_gpu(int *data, int *X, int *Y, int *X_T, int *Y_T, int *X_P, int *Y_P, int *X_I, int *Y_I, int *I, int *I1, int *user_row, int *artist_row, int *user_pref, int *artist_pref, int *user_confidence, int *artist_confidence, int *user_confidence_I, int *artist_confidence_I, int *X_temp, int *Y_temp, int *Y_result_y, int *Y_result_pu, int *Y_temp_2, int *X_result_x, int *X_result_pi, int *X_temp_2, int alpha_val, int iterations, double lambda_val, int features, int endOfArtistIndex, int endOfUserIndex) { int tid = threadIdx.x; while(tid < endOfArtistIndex*endOfUserIndex) { int i = tid / endOfUserIndex; int j = tid % endOfUserIndex; data[i * SPARSE_SIZE + j] = data[i * SPARSE_SIZE + j] * alpha_val; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfUserIndex*features) { int i = tid / features; int j = tid % features; int in_row = tid / features; int in_col = tid % features; int out_row = in_col; int out_col = in_row; X_T [out_row * features + out_col] = X [in_row * features + in_col]; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfArtistIndex*features) { int i = tid / endOfArtistIndex; int j = tid % endOfArtistIndex; int in_row = tid / features; int in_col = tid % features; int out_row = in_col; int out_col = in_row; Y_T [out_row * features + out_col] = Y [in_row * features + in_col]; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfUserIndex) { X_I[tid * endOfUserIndex + tid] = 1; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfArtistIndex) { Y_I[tid * endOfArtistIndex + tid] = 1; I1[tid * endOfArtistIndex + tid] = lambda_val; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < features) { I[tid * features + tid] = 1; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; int res; int bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X[row * features + tid%SPLIT] * X_T[col + endOfUserIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&X_P[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y[row * features + tid%SPLIT] * Y_T[col + endOfArtistIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_P[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < endOfUserIndex) { for(int k = 0; k < NUM_FEATURES; k++) { user_row[tid * NUM_FEATURES + k] = X[tid*NUM_FEATURES + k]; if(user_row[tid * NUM_FEATURES + k] != 0) { user_pref[tid*NUM_FEATURES + k] = 1; } else { user_pref[tid*NUM_FEATURES + k] = user_row[tid*NUM_FEATURES + k]; } user_confidence_I[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k]; user_confidence[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k] + 1; } tid+= blockDim.x * gridDim.x; } tid = threadIdx.x; for(int i = 0; i < iterations; i++) { for(int j = 0; j < endOfUserIndex; j++) { int res; int bid = blockIdx.x; int tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_T[row * features + tid%SPLIT] * user_confidence_I[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_temp[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_temp[row * features + tid%SPLIT] * Y[col + endOfArtistIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_result_y[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } tid = threadIdx.x; while(tid < endOfArtistIndex*endOfArtistIndex) { int row = tid / endOfArtistIndex; int col = tid % endOfArtistIndex; Y_result_pu [row * endOfArtistIndex + col] += Y_P[row * endOfArtistIndex + col] + I1[row * endOfArtistIndex + col]; tid += blockDim.x * gridDim.x; } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_T[row * features + tid%SPLIT] * user_confidence[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_temp_2[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_temp_2[row * features + tid%SPLIT] * Y[col + endOfArtistIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_result_pu[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } } } tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < endOfArtistIndex) { for(int k = 0; k < NUM_FEATURES; k++) { artist_row[tid * NUM_FEATURES + k] = Y[tid*NUM_FEATURES + k]; if(artist_row[tid * NUM_FEATURES + k] != 0) { artist_pref[tid*NUM_FEATURES + k] = 1; } else { artist_pref[tid*NUM_FEATURES + k] = artist_row[tid*NUM_FEATURES + k]; } artist_confidence_I[tid*NUM_FEATURES * k * NUM_FEATURES + k] = artist_row[tid * NUM_FEATURES + k]; artist_confidence[tid*NUM_FEATURES * k * NUM_FEATURES + k] = artist_row[tid * NUM_FEATURES + k] + 1; } tid+= blockDim.x * gridDim.x; } for(int i = 0; i < iterations; i++) { for(int j = 0; j < endOfUserIndex; j++) { int res; int bid = blockIdx.x; int tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_T[row * features + tid%SPLIT] * artist_confidence_I[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&X_temp[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_temp[row * features + tid%SPLIT] * X[col + endOfUserIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&X_result_x[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } tid = threadIdx.x; while(tid < endOfUserIndex*endOfUserIndex) { int row = tid / endOfUserIndex; int col = tid % endOfUserIndex; Y_result_y [row * endOfUserIndex + col] += Y_P[row * endOfUserIndex + col] + I1[row * endOfUserIndex + col]; tid += blockDim.x * gridDim.x; } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_T[row * features + tid%SPLIT] * artist_confidence[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&X_temp_2[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_temp_2[row * features + tid%SPLIT] * X[col + endOfUserIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&X_result_pi[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } } tid = threadIdx.x; while(tid < endOfUserIndex*features) { int row = tid / features; int col = tid % features; X [row * features + col] = Y_result_y[row * features + col] / Y_result_pu[row * features + col]; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfArtistIndex*features) { int row = tid / features; int col = tid % features; Y [row * features + col] = X_result_x[row * features + col] / X_result_pi[row * features + col]; tid += blockDim.x * gridDim.x; } } } void recommend(int user_id, int num_items, int * answer) { int *user_recs, *rec_vector, *X_rec; user_recs = (int *)malloc(sizeof(int) * endOfArtistIndex); rec_vector = (int *)malloc(sizeof(int) * endOfArtistIndex); X_rec = (int *)malloc(sizeof(int) * NUM_FEATURES); int maxVal = 0, index = 0, no = 0; for(int i = 0; i < endOfArtistIndex; i++) { user_recs[i] = dataMatrix[user_id*SPARSE_SIZE + i]; if(user_recs[i] == 0) { user_recs[i] = 1; } else { user_recs[i] = 0; } } for(int i = 0; i < NUM_FEATURES; i++) { X_rec[i] = X[user_id * NUM_FEATURES + i]; } //*********GPU*********// //mat_vec_multiply(Y_T, X_rec, rec_vector, NUM_FEATURES, endOfArtistIndex); /*int *mat_d, *vec_d, *res_vec_d; hipMalloc((void **)&mat_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES); hipMalloc((void **)&vec_d, sizeof(int) * endOfArtistIndex); hipMalloc((void **)&res_vec_d, sizeof(int) * endOfArtistIndex); hipMemcpy(mat_d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice); hipMemcpy(vec_d, X_rec, sizeof(int) * endOfArtistIndex, hipMemcpyHostToDevice); gpu_mat_vec_multiply_shared<<<256, 256>>>(mat_d, vec_d, res_vec_d, NUM_FEATURES, endOfArtistIndex); hipMemcpy(rec_vector, res_vec_d, sizeof(int) * endOfArtistIndex, hipMemcpyDeviceToHost);*/ //*********GPU*********// for(int i = 0; i < num_items; i++) { maxVal = INT_MIN, index = 0; for(int j = 0; j < endOfArtistIndex; j++) { no = 0; if(rec_vector[j] > maxVal) { for(int k = 0; k < i; k++) { if(j == answer[k]) { no = 1; } } if(no == 0) { maxVal = rec_vector[j]; index = j; } } } answer[i] = index; } } int implicit_als_2(int alpha_val, int iterations, double lambda_val, int features) { size_t available, total; X = (int *)malloc(sizeof(int) * endOfUserIndex * NUM_FEATURES); Y = (int *)malloc(sizeof(int) * endOfArtistIndex * NUM_FEATURES); for(int i = 0; i < endOfUserIndex; i++) { for(int j = 0; j < features; j++) { X[i * features + j] = rand() % RAND_RANGE; } } for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < features; j++) { Y[i * features + j] = rand() % RAND_RANGE; } } int *X_P, *Y_P, *data, *X_d, *Y_d; int *X_temp, *Y_temp, *Y_result_y, *Y_result_pu, *Y_temp_2, *X_result_x, *X_result_pi, *X_temp_2; int *X_I, *Y_I, *I, *I1, *user_row, *artist_row, *user_pref, *artist_pref, *user_confidence, *artist_confidence, *user_confidence_I, *artist_confidence_I; data = (int *)malloc(sizeof(int) * SPARSE_SIZE * SPARSE_SIZE); hipMemGetInfo(&available, &total); printf("%u %u\n", available, total); H_ERR(hipMalloc((void **)&data, sizeof(int) * SPARSE_SIZE * SPARSE_SIZE)); H_ERR(hipMalloc((void **)&X_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&Y_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&X_P, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMalloc((void **)&Y_P, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMalloc((void **)&X_I, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMalloc((void **)&Y_I, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMalloc((void **)&I, sizeof(int) * features * features)); H_ERR(hipMalloc((void **)&I1, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMalloc((void **)&user_row, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&artist_row, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&user_pref, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&artist_pref, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&user_confidence, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&artist_confidence, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&user_confidence_I, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&artist_confidence_I, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&X_temp, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&X_temp_2, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&X_result_x, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMalloc((void **)&X_result_pi, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMalloc((void **)&Y_temp, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&Y_temp_2, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&Y_result_y, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMalloc((void **)&Y_result_pu, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMemcpy(data, dataMatrix, sizeof(int) * SPARSE_SIZE * SPARSE_SIZE, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(X_d, X, sizeof(int) * endOfUserIndex * features, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(Y_d, Y, sizeof(int) * endOfArtistIndex * features, hipMemcpyHostToDevice)); hipMemGetInfo(&available, &total); printf("%u %u\n", available, total); hipLaunchKernelGGL(( implicit_als_gpu), dim3(256), dim3(256), 0, 0, data, X_d, Y_d, X_T, Y_T, X_P, Y_P, X_I, Y_I, I, I1, user_row, artist_row, user_pref, artist_pref, user_confidence, artist_confidence, user_confidence_I, artist_confidence_I, X_temp, Y_temp, Y_result_y, Y_result_pu, Y_temp_2, X_result_x, X_result_pi, X_temp_2, alpha_val, iterations, lambda_val, features, endOfArtistIndex, endOfUserIndex); H_ERR(hipMemcpy(X, X_d, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipMemcpy(Y, Y_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); return; } int implicit_als(int alpha_val, int iterations, double lambda_val, int features) { size_t available, total; hipMemGetInfo(&available, &total); //printf("%u %u\n", available, total); double time_beg = wtime(); //GPU alpha mult// for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < endOfUserIndex; j++) { dataMatrix[i * SPARSE_SIZE + j] = dataMatrix[i * SPARSE_SIZE + j] * alpha_val; } } //NEWGPU// int *X_P, *Y_P; X = (int *)malloc(sizeof(int) * endOfUserIndex * NUM_FEATURES); Y = (int *)malloc(sizeof(int) * endOfArtistIndex * NUM_FEATURES); X_T = (int *)malloc(sizeof(int) * endOfUserIndex * NUM_FEATURES); Y_T = (int *)malloc(sizeof(int) * endOfArtistIndex * NUM_FEATURES); X_P = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); Y_P = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); //GPU random// for(int i = 0; i < endOfUserIndex; i++) { for(int j = 0; j < features; j++) { X[i * features + j] = rand() % RAND_RANGE; } } //NEWGPU// //GPU transpose// /*for(int i = 0; i < endOfUserIndex; i++) { for(int j = 0; j < features; j++) { X_T[j * endOfUserIndex + i] = X[i*features + j]; } }*/ int *m1, *t1; H_ERR(hipMalloc((void **)&m1, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&t1, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMemcpy(m1, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_matrix_transpose), dim3(256), dim3(256), 0, 0, m1, t1, endOfUserIndex, NUM_FEATURES); H_ERR(hipMemcpy(X_T, t1, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipFree(m1)); H_ERR(hipFree(t1)); //NEWGPU// //GPU random// for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < features; j++) { Y[i * features + j] = rand() % RAND_RANGE; } } //GPU transpose /*for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < features; j++) { Y_T[j * endOfArtistIndex + i] = Y[i * features + j]; } }*/ int *m2, *t2; H_ERR(hipMalloc((void **)&m2, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&t2, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMemcpy(m2, Y, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_matrix_transpose), dim3(256), dim3(256), 0, 0, m2, t2, endOfUserIndex, NUM_FEATURES); H_ERR(hipMemcpy(Y_T, t2, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipFree(m2)); H_ERR(hipFree(t2)); double elapsed_time = wtime(); elapsed_time -= time_beg; //printf("setup elapsed time is: %f\n", elapsed_time); time_beg = wtime(); int *X_I, *Y_I, *I, *I1, *user_row, *artist_row, *user_pref, *artist_pref, *user_confidence, *artist_confidence, *user_confidence_I, *artist_confidence_I; X_I = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); Y_I = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); I = (int *)malloc(sizeof(int) * features * features); I1 = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); user_row = (int *)malloc(sizeof(int) * endOfArtistIndex * features); artist_row = (int *)malloc(sizeof(int) * endOfUserIndex * features); user_pref = (int *)malloc(sizeof(int) * endOfArtistIndex * features); artist_pref = (int *)malloc(sizeof(int) * endOfUserIndex * features); user_confidence = (int *)malloc(sizeof(int) * endOfUserIndex * features * features); artist_confidence = (int *)malloc(sizeof(int) * endOfArtistIndex * features * features); user_confidence_I = (int *)malloc(sizeof(int) * endOfUserIndex * features * features); artist_confidence_I = (int *)malloc(sizeof(int) * endOfArtistIndex * features * features); int *X_temp, *Y_temp, *Y_result_y, *Y_result_pu, *Y_temp_2, *X_result_x, *X_result_pi, *X_temp_2; X_temp = (int *)malloc(sizeof(int) * endOfUserIndex * features); X_temp_2 = (int *)malloc(sizeof(int) * endOfUserIndex * features); X_result_x = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); X_result_pi = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); Y_temp = (int *)malloc(sizeof(int) * endOfArtistIndex * features); Y_temp_2 = (int *)malloc(sizeof(int) * endOfArtistIndex * features); Y_result_y = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); Y_result_pu = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); //GPU???/// for(int i = 0; i < endOfUserIndex; i++) { X_I[i * endOfUserIndex + i] = 1; } for(int i = 0; i < endOfArtistIndex; i++) { Y_I[i * endOfArtistIndex + i] = 1; } for(int i = 0; i < features; i++) { I[i * features + i] = 1; } for(int i = 0; i < endOfArtistIndex; i++) { I1[i * endOfArtistIndex + i] = lambda_val; } //*********GPU*********// //mat_mat_multiply(X, X_T, X_P, endOfUserIndex, features, endOfUserIndex); int *mat1_d, *mat2_d, *res_d; H_ERR(hipMalloc((void **)&mat1_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_d, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMemcpy(mat1_d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_d, X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_d, mat2_d, res_d, endOfUserIndex, NUM_FEATURES, endOfUserIndex); H_ERR(hipMemcpy(X_P, res_d, sizeof(int) * endOfUserIndex * endOfUserIndex, hipMemcpyDeviceToHost)); H_ERR(hipFree(mat1_d)); H_ERR(hipFree(mat2_d)); H_ERR(hipFree(res_d)); //mat_mat_multiply(Y, Y_T, Y_P, endOfArtistIndex, features, endOfArtistIndex);// int *mat1_2d, *mat2_2d, *res_2d; H_ERR(hipMalloc((void **)&mat1_2d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_2d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_2d, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMemcpy(mat1_2d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_2d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_2d, mat2_2d, res_2d, endOfArtistIndex, NUM_FEATURES, endOfArtistIndex); hipMemGetInfo(&available, &total); //printf("%u %u\n", available, total); H_ERR(hipMemcpy(Y_P, res_2d, sizeof(int) * endOfArtistIndex * endOfArtistIndex, hipMemcpyDeviceToHost)); H_ERR(hipFree(mat1_2d)); H_ERR(hipFree(mat2_2d)); H_ERR(hipFree(res_2d)); //*********GPU*********// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 1 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); int *x_d, *user_row_d, *user_pref_d, *conf_I_d, *conf_d; H_ERR(hipMalloc((void **)&x_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&user_row_d, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&user_pref_d, sizeof(int) * endOfArtistIndex * features)); H_ERR(hipMalloc((void **)&conf_I_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMalloc((void **)&conf_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMemcpy(x_d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_als), dim3(256), dim3(256), 0, 0, x_d, user_row_d, user_pref_d, conf_I_d, conf_d, endOfUserIndex); H_ERR(hipMemcpy(user_row, user_row_d, sizeof(int) * endOfArtistIndex * features, hipMemcpyDeviceToHost)); H_ERR(hipMemcpy(user_pref, user_pref_d, sizeof(int) * endOfArtistIndex * features, hipMemcpyDeviceToHost)); H_ERR(hipMemcpy(user_confidence_I, conf_I_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipMemcpy(user_confidence, conf_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipFree(x_d)); H_ERR(hipFree(user_row_d)); H_ERR(hipFree(user_pref_d)); H_ERR(hipFree(conf_I_d)); H_ERR(hipFree(conf_d)); int *mat1_3d, *mat2_3d, *res_3d; H_ERR(hipMalloc((void **)&mat1_3d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_3d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_3d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); int *mat1_4d, *mat2_4d, *res_4d; H_ERR(hipMalloc((void **)&mat1_4d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_4d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_4d, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); int *mat1_add, *mat2_add, *res_add; H_ERR(hipMalloc((void **)&mat1_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMalloc((void **)&mat2_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMalloc((void **)&res_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); int *mat1_5d, *mat2_5d, *res_5d; H_ERR(hipMalloc((void **)&mat1_5d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_5d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_5d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); int *mat1_6d, *mat2_6d, *res_6d; H_ERR(hipMalloc((void **)&mat1_6d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_6d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_6d, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMemcpy(mat1_3d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_4d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat1_5d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_6d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat1_add, Y_P, sizeof(int) * endOfArtistIndex * endOfArtistIndex, hipMemcpyHostToDevice)); for(int i = 0; i < iterations; i++) { for(int j = 0; j < endOfUserIndex; j++) { /*for(int k = 0; k < features; k++) { user_row[k] = X[j*features + k]; if(user_row[k] != 0) { user_pref[k] = 1; } else { user_pref[k] = user_row[k]; } } for(int k = 0; k < features; k++) { user_confidence_I[k * features + k] = user_row[k]; user_confidence[k * features + k] = user_row[k] + 1; }*/ elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 2 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //*********GPU*********// //mat_mat_multiply(Y_T, user_confidence_I, Y_temp, endOfArtistIndex, features, features);// H_ERR(hipMemcpy(mat2_3d, user_confidence_I + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_3d, mat2_3d, res_3d, endOfArtistIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(hipMemcpy(Y_temp, res_3d, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); //mat_mat_multiply(Y_temp, Y, Y_result_y, endOfArtistIndex, features, endOfArtistIndex);// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 3 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); H_ERR(hipMemcpy(mat1_4d, Y_temp, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_4d, mat2_4d, res_4d, endOfArtistIndex, NUM_FEATURES, endOfArtistIndex); H_ERR(hipMemcpy(Y_result_y, res_4d, sizeof(int) * endOfArtistIndex * endOfArtistIndex, hipMemcpyDeviceToHost)); //*********GPU*********// /*for(int j = 0; j < endOfArtistIndex; j++) { for(int k = 0; k < endOfArtistIndex; k++) { Y_result_y[j*endOfArtistIndex + k] += Y_P[j*endOfArtistIndex + k] + I1[j*endOfArtistIndex + k]; } }*/ H_ERR(hipMemcpy(mat2_add, I1, sizeof(int) * endOfArtistIndex * endOfArtistIndex, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(res_add, Y_result_y, sizeof(int) * endOfArtistIndex * endOfArtistIndex, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_matrix_addition), dim3(256), dim3(256), 0, 0, mat1_add, mat2_add, res_add, endOfArtistIndex, endOfArtistIndex); H_ERR(hipMemcpy(Y_result_pu, res_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex, hipMemcpyDeviceToHost)); //*********GPU*********// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 4 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(Y_T, user_confidence, Y_temp_2, endOfArtistIndex, features, features);// H_ERR(hipMemcpy(mat2_5d, user_confidence + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_5d, mat2_5d, res_5d, endOfArtistIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(hipMemcpy(Y_temp_2, res_5d, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); //mat_mat_multiply(Y_temp_2, Y, Y_result_pu, endOfArtistIndex, features, endOfArtistIndex);// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 6 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); H_ERR(hipMemcpy(mat1_6d, Y_temp_2, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_6d, mat2_6d, res_6d, endOfArtistIndex, NUM_FEATURES, endOfArtistIndex); H_ERR(hipMemcpy(Y_result_pu, res_6d, sizeof(int) * endOfArtistIndex * endOfArtistIndex, hipMemcpyDeviceToHost)); //*********GPU*********// /*for(int k = 0; k < features; k++) { X[i*features + k] = Y_result_y[i*features + k] / Y_result_pu[i*features + k]; }*/ } H_ERR(hipFree(mat1_4d)); H_ERR(hipFree(mat2_4d)); H_ERR(hipFree(res_4d)); H_ERR(hipFree(mat1_3d)); H_ERR(hipFree(mat2_3d)); H_ERR(hipFree(res_3d)); H_ERR(hipFree(mat1_add)); H_ERR(hipFree(mat2_add)); H_ERR(hipFree(res_add)); H_ERR(hipFree(mat1_5d)); H_ERR(hipFree(mat2_5d)); H_ERR(hipFree(res_5d)); H_ERR(hipFree(mat1_6d)); H_ERR(hipFree(mat2_6d)); H_ERR(hipFree(res_6d)); int *y_d, *artist_row_d, *artist_pref_d, *art_conf_I_d, *art_conf_d; H_ERR(hipMalloc((void **)&y_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&artist_row_d, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&artist_pref_d, sizeof(int) * endOfUserIndex * features)); H_ERR(hipMalloc((void **)&art_conf_I_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMalloc((void **)&art_conf_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMemcpy(y_d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_als), dim3(256), dim3(256), 0, 0, y_d, artist_row_d, artist_pref_d, art_conf_I_d, art_conf_d, endOfArtistIndex); H_ERR(hipMemcpy(artist_row, artist_row_d, sizeof(int) * endOfUserIndex * features, hipMemcpyDeviceToHost)); H_ERR(hipMemcpy(artist_pref, artist_pref_d, sizeof(int) * endOfUserIndex * features, hipMemcpyDeviceToHost)); H_ERR(hipMemcpy(artist_confidence_I, art_conf_I_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipMemcpy(artist_confidence, art_conf_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipFree(y_d)); H_ERR(hipFree(artist_row_d)); H_ERR(hipFree(artist_pref_d)); H_ERR(hipFree(art_conf_I_d)); H_ERR(hipFree(art_conf_d)); int *mat1_7d, *mat2_7d, *res_7d; H_ERR(hipMalloc((void **)&mat1_7d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_7d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_7d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); int *mat1_8d, *mat2_8d, *res_8d; H_ERR(hipMalloc((void **)&mat1_8d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_8d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_8d, sizeof(int) * endOfUserIndex * endOfUserIndex)); int *mat1_add2, *mat2_add2, *res_add2; H_ERR(hipMalloc((void **)&mat1_add2, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMalloc((void **)&mat2_add2, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMalloc((void **)&res_add2, sizeof(int) * endOfUserIndex * endOfUserIndex)); int *mat1_9d, *mat2_9d, *res_9d; H_ERR(hipMalloc((void **)&mat1_9d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_9d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_9d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); int *mat1_10d, *mat2_10d, *res_10d; H_ERR(hipMalloc((void **)&mat1_10d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_10d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_10d, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMemcpy(mat1_7d, X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_8d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat1_9d, X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_10d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat1_add2, Y_P, sizeof(int) * endOfUserIndex * endOfUserIndex, hipMemcpyHostToDevice)); for(int j = 0; j < endOfArtistIndex; j++) { /*for(int k = 0; k < features; k++) { artist_row[k] = Y[j*features + k]; if(artist_row[k] != 0) { artist_pref[k] = 1; } else { artist_pref[k] = artist_row[k]; } } for(int k = 0; k < features; k++) { artist_confidence_I[k * features + k] = artist_row[k]; artist_confidence[k * features + k] = artist_row[k] + 1; }*/ //*********GPU*********// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 7 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(X_T, artist_confidence_I, X_temp, endOfUserIndex, features, features);// H_ERR(hipMemcpy(mat2_7d, artist_confidence_I + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_7d, mat2_7d, res_7d, endOfUserIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(hipMemcpy(X_temp, res_7d, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 8 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(X_temp, X, X_result_x, endOfUserIndex, features, endOfUserIndex);// H_ERR(hipMemcpy(mat1_8d, X_temp, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_8d, mat2_8d, res_8d, endOfUserIndex, NUM_FEATURES, endOfUserIndex); H_ERR(hipMemcpy(X_result_x, res_8d, sizeof(int) * endOfUserIndex * endOfUserIndex, hipMemcpyDeviceToHost)); //*********GPU*********// /*for(int j = 0; j < endOfUserIndex; j++) { for(int k = 0; k < endOfUserIndex; k++) { Y_result_y[j*endOfUserIndex + k] += Y_P[j*endOfUserIndex + k] + I1[j*endOfUserIndex + k]; } }*/ H_ERR(hipMemcpy(mat2_add2, I1, sizeof(int) * endOfUserIndex * endOfUserIndex, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(res_add2, Y_result_y, sizeof(int) * endOfUserIndex * endOfUserIndex, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_matrix_addition), dim3(256), dim3(256), 0, 0, mat1_add2, mat2_add2, res_add2, endOfUserIndex, endOfUserIndex); H_ERR(hipMemcpy(Y_result_y, res_add2, sizeof(int) * endOfUserIndex * endOfUserIndex, hipMemcpyDeviceToHost)); //*********GPU*********// //mat_mat_multiply(X_T, artist_confidence, X_temp_2, endOfUserIndex, features, features);// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 9 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); H_ERR(hipMemcpy(mat2_9d, artist_confidence + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_9d, mat2_9d, res_9d, endOfUserIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(hipMemcpy(X_temp_2, res_9d, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 10 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(X_temp_2, X, X_result_pi, endOfUserIndex, features, endOfUserIndex);// H_ERR(hipMemcpy(mat1_10d, X_temp_2, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_mat_multiply_atomic), dim3(256), dim3(256), 0, 0, mat1_10d, mat2_10d, res_10d, endOfUserIndex, NUM_FEATURES, endOfUserIndex); H_ERR(hipMemcpy(X_result_pi, res_10d, sizeof(int) * endOfUserIndex * endOfUserIndex, hipMemcpyDeviceToHost)); //*********GPU*********// /*for(int k = 0; k < features; k++) { Y[i*features + k] = X_result_x[i*features + k] / X_result_pi[i*features + k]; }*/ } H_ERR(hipFree(mat1_7d)); H_ERR(hipFree(mat2_7d)); H_ERR(hipFree(res_7d)); H_ERR(hipFree(mat1_8d)); H_ERR(hipFree(mat2_8d)); H_ERR(hipFree(res_8d)); H_ERR(hipFree(mat1_add2)); H_ERR(hipFree(mat2_add2)); H_ERR(hipFree(res_add2)); H_ERR(hipFree(mat1_9d)); H_ERR(hipFree(mat2_9d)); H_ERR(hipFree(res_9d)); H_ERR(hipFree(mat1_10d)); H_ERR(hipFree(mat2_10d)); H_ERR(hipFree(res_10d)); int *mat1_div, *mat2_div, *res_div; H_ERR(hipMalloc((void **)&mat1_div, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_div, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_div, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(hipMemcpy(mat1_div, Y_result_y, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_div, Y_result_pu, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_div), dim3(256), dim3(256), 0, 0, mat1_div, mat2_div, res_div, endOfUserIndex, NUM_FEATURES); H_ERR(hipMemcpy(X, res_div, sizeof(int) * endOfUserIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipFree(mat1_div)); H_ERR(hipFree(mat2_div)); H_ERR(hipFree(res_div)); int *mat1_div2, *mat2_div2, *res_div2; H_ERR(hipMalloc((void **)&mat1_div2, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&mat2_div2, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(hipMalloc((void **)&res_div2, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(hipMemcpy(mat1_div2, X_result_x, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); H_ERR(hipMemcpy(mat2_div2, X_result_pi, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_mat_div), dim3(256), dim3(256), 0, 0, mat1_div2, mat2_div2, res_div2, endOfArtistIndex, NUM_FEATURES); H_ERR(hipMemcpy(Y, res_div2, sizeof(int) * endOfArtistIndex * NUM_FEATURES, hipMemcpyDeviceToHost)); H_ERR(hipFree(mat1_div2)); H_ERR(hipFree(mat2_div2)); H_ERR(hipFree(res_div2)); } elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 11 elapsed time is: %f\n", elapsed_time); return; } int main (int args, char **argv) { double start_time = wtime(); int newname = 0; dataMatrix = (int *)malloc(sizeof(int) * SPARSE_SIZE * SPARSE_SIZE); users = (char**)malloc(sizeof(char*) * USER_SIZE); for(int i = 0; i < USER_SIZE; i++) { users[i] = (char*)malloc(50 * sizeof(char)); } artists = (char**)malloc(sizeof(char*) * ARTIST_SIZE); for(int i = 0; i < ARTIST_SIZE; i++) { artists[i] = (char*)malloc(50 * sizeof(char)); } artistNames = (char**)malloc(sizeof(char*) * ARTIST_SIZE); for(int i = 0; i < ARTIST_SIZE; i++) { artistNames[i] = (char*)malloc(50 * sizeof(char)); } FILE* data = fopen("usersha1-artmbid-artname-plays.tsv", "r"); //our dataset file (tab separated file) if(data == NULL) { //printf("File read error"); return 0; } //j: 0 (user id), 1 (artist id), 2 (artist name), 3(plays) long i = 0; int j = 0; int currentUserIndex = 0, currentArtistIndex = 0, currentPlayCount = 0; while (1) { char dataLine[LINE_SIZE]; if(i < INPUT_SIZE && fgets(dataLine, sizeof(dataLine), data) != NULL)//reading in entire line using fgets and putting it in dataLine { char * token = strtok(dataLine, "\t"); //parsing the data with the tab separater j = 0; while(j < 4) { if(token == NULL) { break; } if(j == 0)//user id, check if its in the user list: if not, add to list, if it is, save the index { currentUserIndex = checkIfUserExistsInData(token); if(currentUserIndex == -1) //must add to users { currentUserIndex = endOfUserIndex; strcpy(users[endOfUserIndex++], token); } } else if (j == 1) //artist id, check if its in the artist list: if not, add to list, if it is, save the index { newname = 0; currentArtistIndex = checkIfArtistExistsInData(token); if(currentArtistIndex == -1) //must add to artists { currentArtistIndex = endOfArtistIndex; strcpy(artists[endOfArtistIndex], token); newname = 1; } } else if(j == 2)//artist name { if(newname == 1) strcpy(artistNames[endOfArtistIndex++], token); } else if(j == 3) //plays, use the indexes to see where they should go in the data (sparse matrix) { currentPlayCount = atoi(token); //convert to integer and place in sparse matrix dataMatrix[currentUserIndex * SPARSE_SIZE + currentArtistIndex] = currentPlayCount; } token = strtok(NULL, "\t"); //reading the next value of the parsed data j++; } i++; } else { break; } } int *ans; ans = (int *)malloc(sizeof(int) * NUM_RECOMMENDATIONS); double time_beg = wtime(); implicit_als_2(40, ITERATIONS, 0.1, 10); double elapsed_time = wtime(); elapsed_time -= time_beg; printf("implicit elapsed time is: %f\n", elapsed_time); time_beg = wtime(); recommend(USER_ID, NUM_RECOMMENDATIONS, ans); elapsed_time = wtime(); elapsed_time -= time_beg; printf("recommend elapsed time is: %f\n", elapsed_time); printf("User %d Recommendations: \n", USER_ID); for(int i = 0; i < NUM_RECOMMENDATIONS; i++) { printf("%s\n", artistNames[ans[i]]); } elapsed_time = wtime(); elapsed_time -= start_time; printf("total elapsed time is: %f\n", elapsed_time); return 0; }
2108d2ad11009a3bebdc0ddf64761bbe5d6ba528.cu
/* Lucas Tata / Salvatore Amico High Performance Computing on GPUs Final Project Collaborative Filtering GPU Implementation */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <limits.h> #include "../helper/util.h" #include "../helper/wtime.h" #define INPUT_SIZE 16000 //how many lines to read from the dataset #define SPARSE_SIZE 8000 //size of sparse matrix is sparse_size * sparse_size #define USER_SIZE 2048 #define ARTIST_SIZE 8192 #define LINE_SIZE 1024 #define RAND_RANGE 100 //sets the random number generation range #define NUM_RECOMMENDATIONS 5 //number of recommendations to generate for the user #define NUM_FEATURES 10 //number of features to generate for each user in the algorithm #define ITERATIONS 1 //how many iterations you want to run the algorithm with #define USER_ID 1 //indicates which user you want to generate recommendations for #define SHARED_SIZE 100 #define SPLIT 25 int *dataMatrix, *X, *Y, *X_T, *Y_T; //our output sparse matrix (users by artists, data is the play count) char **artists; char **users; char **artistNames; int endOfArtistIndex = 0; //keep tabs on how many artists are currently in there int endOfUserIndex = 0; //keep tabs on how many users are currently in there __global__ void gpu_als(int *x, int *user_row, int *user_pref, int * conf_I, int *conf, int num) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num) { for(int k = 0; k < NUM_FEATURES; k++) { user_row[tid * NUM_FEATURES + k] = x[tid*NUM_FEATURES + k]; if(user_row[tid * NUM_FEATURES + k] != 0) { user_pref[tid*NUM_FEATURES + k] = 1; } else { user_pref[tid*NUM_FEATURES + k] = user_row[tid*NUM_FEATURES + k]; } conf_I[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k]; conf[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k] + 1; } tid+= blockDim.x * gridDim.x; } } __global__ void gpu_mat_mat_multiply(int *a, int *b, int *c, int num_rows, int num_cols, int num_rows2) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num_rows*num_rows2) { int res = 0; int row = tid / num_rows; int col = tid % num_rows; for(int j = 0; j < num_cols; j++) { res += a[row * num_cols + j] * b[col + num_rows2 * j]; } c[tid] = res; tid+= blockDim.x * gridDim.x; } } __global__ void gpu_mat_mat_multiply_shared(int *a, int *b, int *c, int num_rows, int num_cols, int num_rows2) { __shared__ int sres[NUM_FEATURES]; int bid = blockIdx.x; while(bid < num_rows*num_rows2) { int row = bid / num_cols; int col = bid % num_cols; int tid = threadIdx.x; while(tid < num_cols) { sres[tid] = a[row * num_cols + tid] * b[col + num_rows2 * tid]; tid += blockDim.x; } __syncthreads(); for(int i = num_cols/2 ; i > 0; i /= 2) { if(threadIdx.x < i) { int temp = sres[threadIdx.x] + sres[threadIdx.x + i]; sres[threadIdx.x] = temp; } __syncthreads(); } if(threadIdx.x == 0) c[bid] = sres[threadIdx.x]; bid += gridDim.x; __syncthreads(); } } __global__ void gpu_mat_mat_multiply_atomic(int *a, int *b, int *c, int num_rows, int num_cols, int num_rows2) { int res; int bid = blockIdx.x; int tid = threadIdx.x; while(bid * SPLIT < num_rows*num_cols) { int row = bid / num_cols + tid / SPLIT; int col = bid % num_cols; res = a[row * num_cols + tid%SPLIT] * b[col + num_rows2 * tid%SPLIT]; __syncthreads(); atomicAdd(&c[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } } __global__ void gpu_mat_vec_multiply_shared(int *mat, int *vec, int *res, int num_rows, int num_cols) { __shared__ int svec[256]; __shared__ int sres[256]; svec[threadIdx.x] = vec[threadIdx.x]; int bid = blockIdx.x; __syncthreads(); while (bid < num_rows) { sres[threadIdx.x] = mat[bid * num_cols + threadIdx.x] * svec[threadIdx.x]; __syncthreads(); for(int i = blockDim.x/2 ; i > 0; i /= 2) { if(threadIdx.x < i) { int temp = sres[threadIdx.x] + sres[threadIdx.x + i]; sres[threadIdx.x] = temp; } __syncthreads(); } if(threadIdx.x == 0) res[bid] = sres[threadIdx.x]; bid += 128; __syncthreads(); } } __global__ void gpu_matrix_transpose(int *mat, int *res, int num_rows, int num_cols) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num_rows*num_cols) { int in_row = tid / num_cols; int in_col = tid % num_cols; int out_row = in_col; int out_col = in_row; res [out_row * num_cols + out_col] = mat [in_row * num_cols + in_col]; tid += blockDim.x * gridDim.x; } } __global__ void gpu_matrix_alpha(int *mat, int *res, float alpha_val, int num_rows, int num_cols) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < num_rows*num_cols) { int in_row = tid / num_cols; int in_col = tid % num_cols; int out_row = in_col; int out_col = in_row; res [out_row * num_cols + out_col] = mat [in_row * num_cols + in_col]; tid += blockDim.x * gridDim.x; } } __global__ void gpu_matrix_addition(int *mat1, int *mat2, int *res, int num_rows, int num_cols) { int tid = threadIdx.x; while(tid < num_rows*num_cols) { int row = tid / num_cols; int col = tid % num_cols; res [row * num_cols + col] += mat1[row * num_cols + col] + mat2[row * num_cols + col]; tid += blockDim.x * gridDim.x; } } __global__ void gpu_mat_div(int *mat1, int *mat2, int *res, int num_rows, int num_cols) { int tid = threadIdx.x; while(tid < num_rows*num_cols) { int row = tid / num_cols; int col = tid % num_cols; res [row * num_cols + col] = mat1[row * num_cols + col] / mat2[row * num_cols + col]; tid += blockDim.x * gridDim.x; } } int checkIfArtistExistsInData(char * artist) { int i; for(i = 0; i < ARTIST_SIZE; i++) { if(strcmp(artist, artists[i]) == 0) { return i; } } return -1; } int checkIfUserExistsInData(char * user) { int i; for(i = 0; i < USER_SIZE; i++) { if(strcmp(user, users[i]) == 0) { return i; } } return -1; } void mat_mat_multiply(int *mat1, int *mat2, int *res, int num_rows1, int num_cols, int num_rows2) { for(int k = 0; k < num_rows1; k ++) { for(int i = 0; i < num_rows2; i ++) { int temp_res = 0; for (int j = 0; j < num_cols; j ++) { temp_res += mat1[i * num_cols + j] * mat2[k + num_rows2 * j]; } res[k+i*num_rows1] = temp_res; } } } void mat_vec_multiply(int *mat, int *vec, int *res, int num_rows, int num_cols) { for(int i = 0; i < num_rows; i ++) { int temp_res = 0; for (int j = 0; j < num_cols; j ++) { temp_res += mat[i * num_cols + j] * vec[j]; } res[i] = temp_res; } } __global__ void implicit_als_gpu(int *data, int *X, int *Y, int *X_T, int *Y_T, int *X_P, int *Y_P, int *X_I, int *Y_I, int *I, int *I1, int *user_row, int *artist_row, int *user_pref, int *artist_pref, int *user_confidence, int *artist_confidence, int *user_confidence_I, int *artist_confidence_I, int *X_temp, int *Y_temp, int *Y_result_y, int *Y_result_pu, int *Y_temp_2, int *X_result_x, int *X_result_pi, int *X_temp_2, int alpha_val, int iterations, double lambda_val, int features, int endOfArtistIndex, int endOfUserIndex) { int tid = threadIdx.x; while(tid < endOfArtistIndex*endOfUserIndex) { int i = tid / endOfUserIndex; int j = tid % endOfUserIndex; data[i * SPARSE_SIZE + j] = data[i * SPARSE_SIZE + j] * alpha_val; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfUserIndex*features) { int i = tid / features; int j = tid % features; int in_row = tid / features; int in_col = tid % features; int out_row = in_col; int out_col = in_row; X_T [out_row * features + out_col] = X [in_row * features + in_col]; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfArtistIndex*features) { int i = tid / endOfArtistIndex; int j = tid % endOfArtistIndex; int in_row = tid / features; int in_col = tid % features; int out_row = in_col; int out_col = in_row; Y_T [out_row * features + out_col] = Y [in_row * features + in_col]; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfUserIndex) { X_I[tid * endOfUserIndex + tid] = 1; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfArtistIndex) { Y_I[tid * endOfArtistIndex + tid] = 1; I1[tid * endOfArtistIndex + tid] = lambda_val; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < features) { I[tid * features + tid] = 1; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; int res; int bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X[row * features + tid%SPLIT] * X_T[col + endOfUserIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&X_P[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y[row * features + tid%SPLIT] * Y_T[col + endOfArtistIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_P[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < endOfUserIndex) { for(int k = 0; k < NUM_FEATURES; k++) { user_row[tid * NUM_FEATURES + k] = X[tid*NUM_FEATURES + k]; if(user_row[tid * NUM_FEATURES + k] != 0) { user_pref[tid*NUM_FEATURES + k] = 1; } else { user_pref[tid*NUM_FEATURES + k] = user_row[tid*NUM_FEATURES + k]; } user_confidence_I[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k]; user_confidence[tid*NUM_FEATURES * k * NUM_FEATURES + k] = user_row[tid * NUM_FEATURES + k] + 1; } tid+= blockDim.x * gridDim.x; } tid = threadIdx.x; for(int i = 0; i < iterations; i++) { for(int j = 0; j < endOfUserIndex; j++) { int res; int bid = blockIdx.x; int tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_T[row * features + tid%SPLIT] * user_confidence_I[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_temp[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_temp[row * features + tid%SPLIT] * Y[col + endOfArtistIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_result_y[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } tid = threadIdx.x; while(tid < endOfArtistIndex*endOfArtistIndex) { int row = tid / endOfArtistIndex; int col = tid % endOfArtistIndex; Y_result_pu [row * endOfArtistIndex + col] += Y_P[row * endOfArtistIndex + col] + I1[row * endOfArtistIndex + col]; tid += blockDim.x * gridDim.x; } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_T[row * features + tid%SPLIT] * user_confidence[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_temp_2[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfArtistIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = Y_temp_2[row * features + tid%SPLIT] * Y[col + endOfArtistIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&Y_result_pu[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } } } tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < endOfArtistIndex) { for(int k = 0; k < NUM_FEATURES; k++) { artist_row[tid * NUM_FEATURES + k] = Y[tid*NUM_FEATURES + k]; if(artist_row[tid * NUM_FEATURES + k] != 0) { artist_pref[tid*NUM_FEATURES + k] = 1; } else { artist_pref[tid*NUM_FEATURES + k] = artist_row[tid*NUM_FEATURES + k]; } artist_confidence_I[tid*NUM_FEATURES * k * NUM_FEATURES + k] = artist_row[tid * NUM_FEATURES + k]; artist_confidence[tid*NUM_FEATURES * k * NUM_FEATURES + k] = artist_row[tid * NUM_FEATURES + k] + 1; } tid+= blockDim.x * gridDim.x; } for(int i = 0; i < iterations; i++) { for(int j = 0; j < endOfUserIndex; j++) { int res; int bid = blockIdx.x; int tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_T[row * features + tid%SPLIT] * artist_confidence_I[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&X_temp[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_temp[row * features + tid%SPLIT] * X[col + endOfUserIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&X_result_x[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } tid = threadIdx.x; while(tid < endOfUserIndex*endOfUserIndex) { int row = tid / endOfUserIndex; int col = tid % endOfUserIndex; Y_result_y [row * endOfUserIndex + col] += Y_P[row * endOfUserIndex + col] + I1[row * endOfUserIndex + col]; tid += blockDim.x * gridDim.x; } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_T[row * features + tid%SPLIT] * artist_confidence[col + features * tid%SPLIT]; __syncthreads(); atomicAdd(&X_temp_2[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } bid = blockIdx.x; tid = threadIdx.x; while(bid * SPLIT < endOfUserIndex*features) { int row = bid / features + tid / SPLIT; int col = bid % features; res = X_temp_2[row * features + tid%SPLIT] * X[col + endOfUserIndex * tid%SPLIT]; __syncthreads(); atomicAdd(&X_result_pi[bid + tid/SPLIT], res); bid += gridDim.x; __syncthreads(); } } tid = threadIdx.x; while(tid < endOfUserIndex*features) { int row = tid / features; int col = tid % features; X [row * features + col] = Y_result_y[row * features + col] / Y_result_pu[row * features + col]; tid += blockDim.x * gridDim.x; } tid = threadIdx.x; while(tid < endOfArtistIndex*features) { int row = tid / features; int col = tid % features; Y [row * features + col] = X_result_x[row * features + col] / X_result_pi[row * features + col]; tid += blockDim.x * gridDim.x; } } } void recommend(int user_id, int num_items, int * answer) { int *user_recs, *rec_vector, *X_rec; user_recs = (int *)malloc(sizeof(int) * endOfArtistIndex); rec_vector = (int *)malloc(sizeof(int) * endOfArtistIndex); X_rec = (int *)malloc(sizeof(int) * NUM_FEATURES); int maxVal = 0, index = 0, no = 0; for(int i = 0; i < endOfArtistIndex; i++) { user_recs[i] = dataMatrix[user_id*SPARSE_SIZE + i]; if(user_recs[i] == 0) { user_recs[i] = 1; } else { user_recs[i] = 0; } } for(int i = 0; i < NUM_FEATURES; i++) { X_rec[i] = X[user_id * NUM_FEATURES + i]; } //*********GPU*********// //mat_vec_multiply(Y_T, X_rec, rec_vector, NUM_FEATURES, endOfArtistIndex); /*int *mat_d, *vec_d, *res_vec_d; cudaMalloc((void **)&mat_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES); cudaMalloc((void **)&vec_d, sizeof(int) * endOfArtistIndex); cudaMalloc((void **)&res_vec_d, sizeof(int) * endOfArtistIndex); cudaMemcpy(mat_d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice); cudaMemcpy(vec_d, X_rec, sizeof(int) * endOfArtistIndex, cudaMemcpyHostToDevice); gpu_mat_vec_multiply_shared<<<256, 256>>>(mat_d, vec_d, res_vec_d, NUM_FEATURES, endOfArtistIndex); cudaMemcpy(rec_vector, res_vec_d, sizeof(int) * endOfArtistIndex, cudaMemcpyDeviceToHost);*/ //*********GPU*********// for(int i = 0; i < num_items; i++) { maxVal = INT_MIN, index = 0; for(int j = 0; j < endOfArtistIndex; j++) { no = 0; if(rec_vector[j] > maxVal) { for(int k = 0; k < i; k++) { if(j == answer[k]) { no = 1; } } if(no == 0) { maxVal = rec_vector[j]; index = j; } } } answer[i] = index; } } int implicit_als_2(int alpha_val, int iterations, double lambda_val, int features) { size_t available, total; X = (int *)malloc(sizeof(int) * endOfUserIndex * NUM_FEATURES); Y = (int *)malloc(sizeof(int) * endOfArtistIndex * NUM_FEATURES); for(int i = 0; i < endOfUserIndex; i++) { for(int j = 0; j < features; j++) { X[i * features + j] = rand() % RAND_RANGE; } } for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < features; j++) { Y[i * features + j] = rand() % RAND_RANGE; } } int *X_P, *Y_P, *data, *X_d, *Y_d; int *X_temp, *Y_temp, *Y_result_y, *Y_result_pu, *Y_temp_2, *X_result_x, *X_result_pi, *X_temp_2; int *X_I, *Y_I, *I, *I1, *user_row, *artist_row, *user_pref, *artist_pref, *user_confidence, *artist_confidence, *user_confidence_I, *artist_confidence_I; data = (int *)malloc(sizeof(int) * SPARSE_SIZE * SPARSE_SIZE); cudaMemGetInfo(&available, &total); printf("%u %u\n", available, total); H_ERR(cudaMalloc((void **)&data, sizeof(int) * SPARSE_SIZE * SPARSE_SIZE)); H_ERR(cudaMalloc((void **)&X_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&Y_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&X_P, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMalloc((void **)&Y_P, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMalloc((void **)&X_I, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMalloc((void **)&Y_I, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMalloc((void **)&I, sizeof(int) * features * features)); H_ERR(cudaMalloc((void **)&I1, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMalloc((void **)&user_row, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&artist_row, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&user_pref, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&artist_pref, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&user_confidence, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&artist_confidence, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&user_confidence_I, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&artist_confidence_I, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&X_temp, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&X_temp_2, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&X_result_x, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMalloc((void **)&X_result_pi, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMalloc((void **)&Y_temp, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&Y_temp_2, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&Y_result_y, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMalloc((void **)&Y_result_pu, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMemcpy(data, dataMatrix, sizeof(int) * SPARSE_SIZE * SPARSE_SIZE, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(X_d, X, sizeof(int) * endOfUserIndex * features, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(Y_d, Y, sizeof(int) * endOfArtistIndex * features, cudaMemcpyHostToDevice)); cudaMemGetInfo(&available, &total); printf("%u %u\n", available, total); implicit_als_gpu<<<256, 256>>>(data, X_d, Y_d, X_T, Y_T, X_P, Y_P, X_I, Y_I, I, I1, user_row, artist_row, user_pref, artist_pref, user_confidence, artist_confidence, user_confidence_I, artist_confidence_I, X_temp, Y_temp, Y_result_y, Y_result_pu, Y_temp_2, X_result_x, X_result_pi, X_temp_2, alpha_val, iterations, lambda_val, features, endOfArtistIndex, endOfUserIndex); H_ERR(cudaMemcpy(X, X_d, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaMemcpy(Y, Y_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); return; } int implicit_als(int alpha_val, int iterations, double lambda_val, int features) { size_t available, total; cudaMemGetInfo(&available, &total); //printf("%u %u\n", available, total); double time_beg = wtime(); //GPU alpha mult// for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < endOfUserIndex; j++) { dataMatrix[i * SPARSE_SIZE + j] = dataMatrix[i * SPARSE_SIZE + j] * alpha_val; } } //NEWGPU// int *X_P, *Y_P; X = (int *)malloc(sizeof(int) * endOfUserIndex * NUM_FEATURES); Y = (int *)malloc(sizeof(int) * endOfArtistIndex * NUM_FEATURES); X_T = (int *)malloc(sizeof(int) * endOfUserIndex * NUM_FEATURES); Y_T = (int *)malloc(sizeof(int) * endOfArtistIndex * NUM_FEATURES); X_P = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); Y_P = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); //GPU random// for(int i = 0; i < endOfUserIndex; i++) { for(int j = 0; j < features; j++) { X[i * features + j] = rand() % RAND_RANGE; } } //NEWGPU// //GPU transpose// /*for(int i = 0; i < endOfUserIndex; i++) { for(int j = 0; j < features; j++) { X_T[j * endOfUserIndex + i] = X[i*features + j]; } }*/ int *m1, *t1; H_ERR(cudaMalloc((void **)&m1, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&t1, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMemcpy(m1, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_matrix_transpose<<<256, 256>>>(m1, t1, endOfUserIndex, NUM_FEATURES); H_ERR(cudaMemcpy(X_T, t1, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(m1)); H_ERR(cudaFree(t1)); //NEWGPU// //GPU random// for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < features; j++) { Y[i * features + j] = rand() % RAND_RANGE; } } //GPU transpose /*for(int i = 0; i < endOfArtistIndex; i++) { for(int j = 0; j < features; j++) { Y_T[j * endOfArtistIndex + i] = Y[i * features + j]; } }*/ int *m2, *t2; H_ERR(cudaMalloc((void **)&m2, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&t2, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMemcpy(m2, Y, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_matrix_transpose<<<256, 256>>>(m2, t2, endOfUserIndex, NUM_FEATURES); H_ERR(cudaMemcpy(Y_T, t2, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(m2)); H_ERR(cudaFree(t2)); double elapsed_time = wtime(); elapsed_time -= time_beg; //printf("setup elapsed time is: %f\n", elapsed_time); time_beg = wtime(); int *X_I, *Y_I, *I, *I1, *user_row, *artist_row, *user_pref, *artist_pref, *user_confidence, *artist_confidence, *user_confidence_I, *artist_confidence_I; X_I = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); Y_I = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); I = (int *)malloc(sizeof(int) * features * features); I1 = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); user_row = (int *)malloc(sizeof(int) * endOfArtistIndex * features); artist_row = (int *)malloc(sizeof(int) * endOfUserIndex * features); user_pref = (int *)malloc(sizeof(int) * endOfArtistIndex * features); artist_pref = (int *)malloc(sizeof(int) * endOfUserIndex * features); user_confidence = (int *)malloc(sizeof(int) * endOfUserIndex * features * features); artist_confidence = (int *)malloc(sizeof(int) * endOfArtistIndex * features * features); user_confidence_I = (int *)malloc(sizeof(int) * endOfUserIndex * features * features); artist_confidence_I = (int *)malloc(sizeof(int) * endOfArtistIndex * features * features); int *X_temp, *Y_temp, *Y_result_y, *Y_result_pu, *Y_temp_2, *X_result_x, *X_result_pi, *X_temp_2; X_temp = (int *)malloc(sizeof(int) * endOfUserIndex * features); X_temp_2 = (int *)malloc(sizeof(int) * endOfUserIndex * features); X_result_x = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); X_result_pi = (int *)malloc(sizeof(int) * endOfUserIndex * endOfUserIndex); Y_temp = (int *)malloc(sizeof(int) * endOfArtistIndex * features); Y_temp_2 = (int *)malloc(sizeof(int) * endOfArtistIndex * features); Y_result_y = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); Y_result_pu = (int *)malloc(sizeof(int) * endOfArtistIndex * endOfArtistIndex); //GPU???/// for(int i = 0; i < endOfUserIndex; i++) { X_I[i * endOfUserIndex + i] = 1; } for(int i = 0; i < endOfArtistIndex; i++) { Y_I[i * endOfArtistIndex + i] = 1; } for(int i = 0; i < features; i++) { I[i * features + i] = 1; } for(int i = 0; i < endOfArtistIndex; i++) { I1[i * endOfArtistIndex + i] = lambda_val; } //*********GPU*********// //mat_mat_multiply(X, X_T, X_P, endOfUserIndex, features, endOfUserIndex); int *mat1_d, *mat2_d, *res_d; H_ERR(cudaMalloc((void **)&mat1_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_d, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMemcpy(mat1_d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_d, X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_d, mat2_d, res_d, endOfUserIndex, NUM_FEATURES, endOfUserIndex); H_ERR(cudaMemcpy(X_P, res_d, sizeof(int) * endOfUserIndex * endOfUserIndex, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(mat1_d)); H_ERR(cudaFree(mat2_d)); H_ERR(cudaFree(res_d)); //mat_mat_multiply(Y, Y_T, Y_P, endOfArtistIndex, features, endOfArtistIndex);// int *mat1_2d, *mat2_2d, *res_2d; H_ERR(cudaMalloc((void **)&mat1_2d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_2d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_2d, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMemcpy(mat1_2d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_2d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_2d, mat2_2d, res_2d, endOfArtistIndex, NUM_FEATURES, endOfArtistIndex); cudaMemGetInfo(&available, &total); //printf("%u %u\n", available, total); H_ERR(cudaMemcpy(Y_P, res_2d, sizeof(int) * endOfArtistIndex * endOfArtistIndex, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(mat1_2d)); H_ERR(cudaFree(mat2_2d)); H_ERR(cudaFree(res_2d)); //*********GPU*********// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 1 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); int *x_d, *user_row_d, *user_pref_d, *conf_I_d, *conf_d; H_ERR(cudaMalloc((void **)&x_d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&user_row_d, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&user_pref_d, sizeof(int) * endOfArtistIndex * features)); H_ERR(cudaMalloc((void **)&conf_I_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&conf_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMemcpy(x_d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_als<<<256, 256>>>(x_d, user_row_d, user_pref_d, conf_I_d, conf_d, endOfUserIndex); H_ERR(cudaMemcpy(user_row, user_row_d, sizeof(int) * endOfArtistIndex * features, cudaMemcpyDeviceToHost)); H_ERR(cudaMemcpy(user_pref, user_pref_d, sizeof(int) * endOfArtistIndex * features, cudaMemcpyDeviceToHost)); H_ERR(cudaMemcpy(user_confidence_I, conf_I_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaMemcpy(user_confidence, conf_d, sizeof(int) * endOfUserIndex * NUM_FEATURES * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(x_d)); H_ERR(cudaFree(user_row_d)); H_ERR(cudaFree(user_pref_d)); H_ERR(cudaFree(conf_I_d)); H_ERR(cudaFree(conf_d)); int *mat1_3d, *mat2_3d, *res_3d; H_ERR(cudaMalloc((void **)&mat1_3d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_3d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_3d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); int *mat1_4d, *mat2_4d, *res_4d; H_ERR(cudaMalloc((void **)&mat1_4d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_4d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_4d, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); int *mat1_add, *mat2_add, *res_add; H_ERR(cudaMalloc((void **)&mat1_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMalloc((void **)&mat2_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMalloc((void **)&res_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); int *mat1_5d, *mat2_5d, *res_5d; H_ERR(cudaMalloc((void **)&mat1_5d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_5d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_5d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); int *mat1_6d, *mat2_6d, *res_6d; H_ERR(cudaMalloc((void **)&mat1_6d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_6d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_6d, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMemcpy(mat1_3d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_4d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat1_5d, Y_T, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_6d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat1_add, Y_P, sizeof(int) * endOfArtistIndex * endOfArtistIndex, cudaMemcpyHostToDevice)); for(int i = 0; i < iterations; i++) { for(int j = 0; j < endOfUserIndex; j++) { /*for(int k = 0; k < features; k++) { user_row[k] = X[j*features + k]; if(user_row[k] != 0) { user_pref[k] = 1; } else { user_pref[k] = user_row[k]; } } for(int k = 0; k < features; k++) { user_confidence_I[k * features + k] = user_row[k]; user_confidence[k * features + k] = user_row[k] + 1; }*/ elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 2 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //*********GPU*********// //mat_mat_multiply(Y_T, user_confidence_I, Y_temp, endOfArtistIndex, features, features);// H_ERR(cudaMemcpy(mat2_3d, user_confidence_I + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_3d, mat2_3d, res_3d, endOfArtistIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(cudaMemcpy(Y_temp, res_3d, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); //mat_mat_multiply(Y_temp, Y, Y_result_y, endOfArtistIndex, features, endOfArtistIndex);// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 3 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); H_ERR(cudaMemcpy(mat1_4d, Y_temp, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_4d, mat2_4d, res_4d, endOfArtistIndex, NUM_FEATURES, endOfArtistIndex); H_ERR(cudaMemcpy(Y_result_y, res_4d, sizeof(int) * endOfArtistIndex * endOfArtistIndex, cudaMemcpyDeviceToHost)); //*********GPU*********// /*for(int j = 0; j < endOfArtistIndex; j++) { for(int k = 0; k < endOfArtistIndex; k++) { Y_result_y[j*endOfArtistIndex + k] += Y_P[j*endOfArtistIndex + k] + I1[j*endOfArtistIndex + k]; } }*/ H_ERR(cudaMemcpy(mat2_add, I1, sizeof(int) * endOfArtistIndex * endOfArtistIndex, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(res_add, Y_result_y, sizeof(int) * endOfArtistIndex * endOfArtistIndex, cudaMemcpyHostToDevice)); gpu_matrix_addition<<<256, 256>>>(mat1_add, mat2_add, res_add, endOfArtistIndex, endOfArtistIndex); H_ERR(cudaMemcpy(Y_result_pu, res_add, sizeof(int) * endOfArtistIndex * endOfArtistIndex, cudaMemcpyDeviceToHost)); //*********GPU*********// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 4 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(Y_T, user_confidence, Y_temp_2, endOfArtistIndex, features, features);// H_ERR(cudaMemcpy(mat2_5d, user_confidence + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_5d, mat2_5d, res_5d, endOfArtistIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(cudaMemcpy(Y_temp_2, res_5d, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); //mat_mat_multiply(Y_temp_2, Y, Y_result_pu, endOfArtistIndex, features, endOfArtistIndex);// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 6 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); H_ERR(cudaMemcpy(mat1_6d, Y_temp_2, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_6d, mat2_6d, res_6d, endOfArtistIndex, NUM_FEATURES, endOfArtistIndex); H_ERR(cudaMemcpy(Y_result_pu, res_6d, sizeof(int) * endOfArtistIndex * endOfArtistIndex, cudaMemcpyDeviceToHost)); //*********GPU*********// /*for(int k = 0; k < features; k++) { X[i*features + k] = Y_result_y[i*features + k] / Y_result_pu[i*features + k]; }*/ } H_ERR(cudaFree(mat1_4d)); H_ERR(cudaFree(mat2_4d)); H_ERR(cudaFree(res_4d)); H_ERR(cudaFree(mat1_3d)); H_ERR(cudaFree(mat2_3d)); H_ERR(cudaFree(res_3d)); H_ERR(cudaFree(mat1_add)); H_ERR(cudaFree(mat2_add)); H_ERR(cudaFree(res_add)); H_ERR(cudaFree(mat1_5d)); H_ERR(cudaFree(mat2_5d)); H_ERR(cudaFree(res_5d)); H_ERR(cudaFree(mat1_6d)); H_ERR(cudaFree(mat2_6d)); H_ERR(cudaFree(res_6d)); int *y_d, *artist_row_d, *artist_pref_d, *art_conf_I_d, *art_conf_d; H_ERR(cudaMalloc((void **)&y_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&artist_row_d, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&artist_pref_d, sizeof(int) * endOfUserIndex * features)); H_ERR(cudaMalloc((void **)&art_conf_I_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&art_conf_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMemcpy(y_d, Y, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_als<<<256, 256>>>(y_d, artist_row_d, artist_pref_d, art_conf_I_d, art_conf_d, endOfArtistIndex); H_ERR(cudaMemcpy(artist_row, artist_row_d, sizeof(int) * endOfUserIndex * features, cudaMemcpyDeviceToHost)); H_ERR(cudaMemcpy(artist_pref, artist_pref_d, sizeof(int) * endOfUserIndex * features, cudaMemcpyDeviceToHost)); H_ERR(cudaMemcpy(artist_confidence_I, art_conf_I_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaMemcpy(artist_confidence, art_conf_d, sizeof(int) * endOfArtistIndex * NUM_FEATURES * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(y_d)); H_ERR(cudaFree(artist_row_d)); H_ERR(cudaFree(artist_pref_d)); H_ERR(cudaFree(art_conf_I_d)); H_ERR(cudaFree(art_conf_d)); int *mat1_7d, *mat2_7d, *res_7d; H_ERR(cudaMalloc((void **)&mat1_7d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_7d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_7d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); int *mat1_8d, *mat2_8d, *res_8d; H_ERR(cudaMalloc((void **)&mat1_8d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_8d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_8d, sizeof(int) * endOfUserIndex * endOfUserIndex)); int *mat1_add2, *mat2_add2, *res_add2; H_ERR(cudaMalloc((void **)&mat1_add2, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMalloc((void **)&mat2_add2, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMalloc((void **)&res_add2, sizeof(int) * endOfUserIndex * endOfUserIndex)); int *mat1_9d, *mat2_9d, *res_9d; H_ERR(cudaMalloc((void **)&mat1_9d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_9d, sizeof(int) * NUM_FEATURES * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_9d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); int *mat1_10d, *mat2_10d, *res_10d; H_ERR(cudaMalloc((void **)&mat1_10d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_10d, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_10d, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMemcpy(mat1_7d, X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_8d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat1_9d, X_T, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_10d, X, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat1_add2, Y_P, sizeof(int) * endOfUserIndex * endOfUserIndex, cudaMemcpyHostToDevice)); for(int j = 0; j < endOfArtistIndex; j++) { /*for(int k = 0; k < features; k++) { artist_row[k] = Y[j*features + k]; if(artist_row[k] != 0) { artist_pref[k] = 1; } else { artist_pref[k] = artist_row[k]; } } for(int k = 0; k < features; k++) { artist_confidence_I[k * features + k] = artist_row[k]; artist_confidence[k * features + k] = artist_row[k] + 1; }*/ //*********GPU*********// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 7 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(X_T, artist_confidence_I, X_temp, endOfUserIndex, features, features);// H_ERR(cudaMemcpy(mat2_7d, artist_confidence_I + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_7d, mat2_7d, res_7d, endOfUserIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(cudaMemcpy(X_temp, res_7d, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 8 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(X_temp, X, X_result_x, endOfUserIndex, features, endOfUserIndex);// H_ERR(cudaMemcpy(mat1_8d, X_temp, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_8d, mat2_8d, res_8d, endOfUserIndex, NUM_FEATURES, endOfUserIndex); H_ERR(cudaMemcpy(X_result_x, res_8d, sizeof(int) * endOfUserIndex * endOfUserIndex, cudaMemcpyDeviceToHost)); //*********GPU*********// /*for(int j = 0; j < endOfUserIndex; j++) { for(int k = 0; k < endOfUserIndex; k++) { Y_result_y[j*endOfUserIndex + k] += Y_P[j*endOfUserIndex + k] + I1[j*endOfUserIndex + k]; } }*/ H_ERR(cudaMemcpy(mat2_add2, I1, sizeof(int) * endOfUserIndex * endOfUserIndex, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(res_add2, Y_result_y, sizeof(int) * endOfUserIndex * endOfUserIndex, cudaMemcpyHostToDevice)); gpu_matrix_addition<<<256, 256>>>(mat1_add2, mat2_add2, res_add2, endOfUserIndex, endOfUserIndex); H_ERR(cudaMemcpy(Y_result_y, res_add2, sizeof(int) * endOfUserIndex * endOfUserIndex, cudaMemcpyDeviceToHost)); //*********GPU*********// //mat_mat_multiply(X_T, artist_confidence, X_temp_2, endOfUserIndex, features, features);// elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 9 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); H_ERR(cudaMemcpy(mat2_9d, artist_confidence + j * NUM_FEATURES, sizeof(int) * NUM_FEATURES * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_9d, mat2_9d, res_9d, endOfUserIndex, NUM_FEATURES, NUM_FEATURES); H_ERR(cudaMemcpy(X_temp_2, res_9d, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 10 elapsed time is: %f\n", elapsed_time); time_beg = wtime(); //mat_mat_multiply(X_temp_2, X, X_result_pi, endOfUserIndex, features, endOfUserIndex);// H_ERR(cudaMemcpy(mat1_10d, X_temp_2, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_mat_multiply_atomic<<<256, 256>>>(mat1_10d, mat2_10d, res_10d, endOfUserIndex, NUM_FEATURES, endOfUserIndex); H_ERR(cudaMemcpy(X_result_pi, res_10d, sizeof(int) * endOfUserIndex * endOfUserIndex, cudaMemcpyDeviceToHost)); //*********GPU*********// /*for(int k = 0; k < features; k++) { Y[i*features + k] = X_result_x[i*features + k] / X_result_pi[i*features + k]; }*/ } H_ERR(cudaFree(mat1_7d)); H_ERR(cudaFree(mat2_7d)); H_ERR(cudaFree(res_7d)); H_ERR(cudaFree(mat1_8d)); H_ERR(cudaFree(mat2_8d)); H_ERR(cudaFree(res_8d)); H_ERR(cudaFree(mat1_add2)); H_ERR(cudaFree(mat2_add2)); H_ERR(cudaFree(res_add2)); H_ERR(cudaFree(mat1_9d)); H_ERR(cudaFree(mat2_9d)); H_ERR(cudaFree(res_9d)); H_ERR(cudaFree(mat1_10d)); H_ERR(cudaFree(mat2_10d)); H_ERR(cudaFree(res_10d)); int *mat1_div, *mat2_div, *res_div; H_ERR(cudaMalloc((void **)&mat1_div, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_div, sizeof(int) * endOfUserIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_div, sizeof(int) * endOfUserIndex * endOfUserIndex)); H_ERR(cudaMemcpy(mat1_div, Y_result_y, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_div, Y_result_pu, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_div<<<256, 256>>>(mat1_div, mat2_div, res_div, endOfUserIndex, NUM_FEATURES); H_ERR(cudaMemcpy(X, res_div, sizeof(int) * endOfUserIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(mat1_div)); H_ERR(cudaFree(mat2_div)); H_ERR(cudaFree(res_div)); int *mat1_div2, *mat2_div2, *res_div2; H_ERR(cudaMalloc((void **)&mat1_div2, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&mat2_div2, sizeof(int) * endOfArtistIndex * NUM_FEATURES)); H_ERR(cudaMalloc((void **)&res_div2, sizeof(int) * endOfArtistIndex * endOfArtistIndex)); H_ERR(cudaMemcpy(mat1_div2, X_result_x, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); H_ERR(cudaMemcpy(mat2_div2, X_result_pi, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyHostToDevice)); gpu_mat_div<<<256, 256>>>(mat1_div2, mat2_div2, res_div2, endOfArtistIndex, NUM_FEATURES); H_ERR(cudaMemcpy(Y, res_div2, sizeof(int) * endOfArtistIndex * NUM_FEATURES, cudaMemcpyDeviceToHost)); H_ERR(cudaFree(mat1_div2)); H_ERR(cudaFree(mat2_div2)); H_ERR(cudaFree(res_div2)); } elapsed_time = wtime(); elapsed_time -= time_beg; //printf("part 11 elapsed time is: %f\n", elapsed_time); return; } int main (int args, char **argv) { double start_time = wtime(); int newname = 0; dataMatrix = (int *)malloc(sizeof(int) * SPARSE_SIZE * SPARSE_SIZE); users = (char**)malloc(sizeof(char*) * USER_SIZE); for(int i = 0; i < USER_SIZE; i++) { users[i] = (char*)malloc(50 * sizeof(char)); } artists = (char**)malloc(sizeof(char*) * ARTIST_SIZE); for(int i = 0; i < ARTIST_SIZE; i++) { artists[i] = (char*)malloc(50 * sizeof(char)); } artistNames = (char**)malloc(sizeof(char*) * ARTIST_SIZE); for(int i = 0; i < ARTIST_SIZE; i++) { artistNames[i] = (char*)malloc(50 * sizeof(char)); } FILE* data = fopen("usersha1-artmbid-artname-plays.tsv", "r"); //our dataset file (tab separated file) if(data == NULL) { //printf("File read error"); return 0; } //j: 0 (user id), 1 (artist id), 2 (artist name), 3(plays) long i = 0; int j = 0; int currentUserIndex = 0, currentArtistIndex = 0, currentPlayCount = 0; while (1) { char dataLine[LINE_SIZE]; if(i < INPUT_SIZE && fgets(dataLine, sizeof(dataLine), data) != NULL)//reading in entire line using fgets and putting it in dataLine { char * token = strtok(dataLine, "\t"); //parsing the data with the tab separater j = 0; while(j < 4) { if(token == NULL) { break; } if(j == 0)//user id, check if its in the user list: if not, add to list, if it is, save the index { currentUserIndex = checkIfUserExistsInData(token); if(currentUserIndex == -1) //must add to users { currentUserIndex = endOfUserIndex; strcpy(users[endOfUserIndex++], token); } } else if (j == 1) //artist id, check if its in the artist list: if not, add to list, if it is, save the index { newname = 0; currentArtistIndex = checkIfArtistExistsInData(token); if(currentArtistIndex == -1) //must add to artists { currentArtistIndex = endOfArtistIndex; strcpy(artists[endOfArtistIndex], token); newname = 1; } } else if(j == 2)//artist name { if(newname == 1) strcpy(artistNames[endOfArtistIndex++], token); } else if(j == 3) //plays, use the indexes to see where they should go in the data (sparse matrix) { currentPlayCount = atoi(token); //convert to integer and place in sparse matrix dataMatrix[currentUserIndex * SPARSE_SIZE + currentArtistIndex] = currentPlayCount; } token = strtok(NULL, "\t"); //reading the next value of the parsed data j++; } i++; } else { break; } } int *ans; ans = (int *)malloc(sizeof(int) * NUM_RECOMMENDATIONS); double time_beg = wtime(); implicit_als_2(40, ITERATIONS, 0.1, 10); double elapsed_time = wtime(); elapsed_time -= time_beg; printf("implicit elapsed time is: %f\n", elapsed_time); time_beg = wtime(); recommend(USER_ID, NUM_RECOMMENDATIONS, ans); elapsed_time = wtime(); elapsed_time -= time_beg; printf("recommend elapsed time is: %f\n", elapsed_time); printf("User %d Recommendations: \n", USER_ID); for(int i = 0; i < NUM_RECOMMENDATIONS; i++) { printf("%s\n", artistNames[ans[i]]); } elapsed_time = wtime(); elapsed_time -= start_time; printf("total elapsed time is: %f\n", elapsed_time); return 0; }
52661b9416486810f42124f5f82f7d4f882cefb4.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _EXP_KERNEL_ #define _EXP_KERNEL_ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> /* * The actual kernel */ template <class T> __global__ void expKernel(T * in, T * out, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n) out[index] = exp(in[index]); __syncthreads(); } /* Wrapper function for expKernel n - array size */ template <class T> void exp(T * in, T * out, int n, int threadsPerBlock){ dim3 grid(ceil(n/(float)threadsPerBlock), 1, 1); dim3 block(threadsPerBlock, 1, 1); hipLaunchKernelGGL(( expKernel<T>), dim3(grid), dim3(block), 0, 0, in, out, n); hipError_t cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("sigmoid kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); } template void exp<float>(float * in, float * out, int n, int threadsPerBlock); template void exp<double>(double * in, double * out, int n, int threadsPerBlock); #endif
52661b9416486810f42124f5f82f7d4f882cefb4.cu
#ifndef _EXP_KERNEL_ #define _EXP_KERNEL_ #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <math.h> /* * The actual kernel */ template <class T> __global__ void expKernel(T * in, T * out, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n) out[index] = exp(in[index]); __syncthreads(); } /* Wrapper function for expKernel n - array size */ template <class T> void exp(T * in, T * out, int n, int threadsPerBlock){ dim3 grid(ceil(n/(float)threadsPerBlock), 1, 1); dim3 block(threadsPerBlock, 1, 1); expKernel<T><<<grid, block>>>(in, out, n); cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != CUDA_SUCCESS) printf("sigmoid kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); } template void exp<float>(float * in, float * out, int n, int threadsPerBlock); template void exp<double>(double * in, double * out, int n, int threadsPerBlock); #endif
6af5fcc91a183a28f8f9b37fd3405eac0bf76673.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define T_PER_BLOCK 16 #define MINF __int_as_float(0xff800000) __global__ void resampleUCHAR4_Kernel(uchar4* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < outputWidth && y < outputHeight) { const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1); const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1); const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f); const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f); if (xInput < inputWidth && yInput < inputHeight) { d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput]; } } }
6af5fcc91a183a28f8f9b37fd3405eac0bf76673.cu
#include "includes.h" #define T_PER_BLOCK 16 #define MINF __int_as_float(0xff800000) __global__ void resampleUCHAR4_Kernel(uchar4* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < outputWidth && y < outputHeight) { const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1); const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1); const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f); const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f); if (xInput < inputWidth && yInput < inputHeight) { d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput]; } } }
fac8d576f8a86ec4106c84b9e53890c8aad2a95b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO parquet writer class implementation */ #include <io/statistics/column_statistics.cuh> #include "writer_impl.hpp" #include <io/utilities/column_utils.cuh> #include "compact_protocol_writer.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <nvcomp/snappy.h> #include <algorithm> #include <cstring> #include <numeric> #include <utility> namespace cudf { namespace io { namespace detail { namespace parquet { using namespace cudf::io::parquet; using namespace cudf::io; namespace { /** * @brief Helper for pinned host memory */ template <typename T> using pinned_buffer = std::unique_ptr<T, decltype(&hipHostFree)>; /** * @brief Function that translates GDF compression to parquet compression */ parquet::Compression to_parquet_compression(compression_type compression) { switch (compression) { case compression_type::AUTO: case compression_type::SNAPPY: return parquet::Compression::SNAPPY; case compression_type::NONE: return parquet::Compression::UNCOMPRESSED; default: CUDF_EXPECTS(false, "Unsupported compression type"); return parquet::Compression::UNCOMPRESSED; } } } // namespace struct linked_column_view; using LinkedColPtr = std::shared_ptr<linked_column_view>; using LinkedColVector = std::vector<LinkedColPtr>; /** * @brief column_view with the added member pointer to the parent of this column. * */ struct linked_column_view : public column_view { // TODO(cp): we are currently keeping all column_view children info multiple times - once for each // copy of this object. Options: // 1. Inherit from column_view_base. Only lose out on children vector. That is not needed. // 2. Don't inherit at all. make linked_column_view keep a reference wrapper to its column_view linked_column_view(column_view const& col) : column_view(col), parent(nullptr) { for (auto child_it = col.child_begin(); child_it < col.child_end(); ++child_it) { children.push_back(std::make_shared<linked_column_view>(this, *child_it)); } } linked_column_view(linked_column_view* parent, column_view const& col) : column_view(col), parent(parent) { for (auto child_it = col.child_begin(); child_it < col.child_end(); ++child_it) { children.push_back(std::make_shared<linked_column_view>(this, *child_it)); } } linked_column_view* parent; //!< Pointer to parent of this column. Nullptr if root LinkedColVector children; }; /** * @brief Converts all column_views of a table into linked_column_views * * @param table table of columns to convert * @return Vector of converted linked_column_views */ LinkedColVector input_table_to_linked_columns(table_view const& table) { LinkedColVector result; for (column_view const& col : table) { result.emplace_back(std::make_shared<linked_column_view>(col)); } return result; } /** * @brief Extends SchemaElement to add members required in constructing parquet_column_view * * Added members are: * 1. leaf_column: Pointer to leaf linked_column_view which points to the corresponding data stream * of a leaf schema node. For non-leaf struct node, this is nullptr. * 2. stats_dtype: datatype for statistics calculation required for the data stream of a leaf node. * 3. ts_scale: scale to multiply or divide timestamp by in order to convert timestamp to parquet * supported types */ struct schema_tree_node : public SchemaElement { LinkedColPtr leaf_column; statistics_dtype stats_dtype; int32_t ts_scale; // TODO(fut): Think about making schema a class that holds a vector of schema_tree_nodes. The // function construct_schema_tree could be its constructor. It can have method to get the per // column nullability given a schema node index corresponding to a leaf schema. Much easier than // that is a method to get path in schema, given a leaf node }; struct leaf_schema_fn { schema_tree_node& col_schema; LinkedColPtr const& col; column_in_metadata const& col_meta; bool timestamp_is_int96; template <typename T> std::enable_if_t<std::is_same_v<T, bool>, void> operator()() { col_schema.type = Type::BOOLEAN; col_schema.stats_dtype = statistics_dtype::dtype_bool; } template <typename T> std::enable_if_t<std::is_same_v<T, int8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; } template <typename T> std::enable_if_t<std::is_same_v<T, int16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; } template <typename T> std::enable_if_t<std::is_same_v<T, int32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, int64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, uint8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; } template <typename T> std::enable_if_t<std::is_same_v<T, uint16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; } template <typename T> std::enable_if_t<std::is_same_v<T, uint32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, uint64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::UINT_64; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, float>, void> operator()() { col_schema.type = Type::FLOAT; col_schema.stats_dtype = statistics_dtype::dtype_float32; } template <typename T> std::enable_if_t<std::is_same_v<T, double>, void> operator()() { col_schema.type = Type::DOUBLE; col_schema.stats_dtype = statistics_dtype::dtype_float64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::string_view>, void> operator()() { col_schema.type = Type::BYTE_ARRAY; col_schema.converted_type = ConvertedType::UTF8; col_schema.stats_dtype = statistics_dtype::dtype_string; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::DATE; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_s>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; col_schema.ts_scale = 1000; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ms>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_us>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ns>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; col_schema.ts_scale = -1000; // negative value indicates division by absolute value } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_s>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.ts_scale = 1000; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ms>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_us>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ns>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.ts_scale = -1000; // negative value indicates division by absolute value } template <typename T> std::enable_if_t<cudf::is_fixed_point<T>(), void> operator()() { if (std::is_same_v<T, numeric::decimal32>) { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } else if (std::is_same_v<T, numeric::decimal64>) { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_decimal64; } else { CUDF_FAIL("Unsupported fixed point type for parquet writer"); } col_schema.converted_type = ConvertedType::DECIMAL; col_schema.decimal_scale = -col->type().scale(); // parquet and cudf disagree about scale signs CUDF_EXPECTS(col_meta.is_decimal_precision_set(), "Precision must be specified for decimal columns"); CUDF_EXPECTS(col_meta.get_decimal_precision() >= col_schema.decimal_scale, "Precision must be equal to or greater than scale!"); col_schema.decimal_precision = col_meta.get_decimal_precision(); } template <typename T> std::enable_if_t<cudf::is_nested<T>(), void> operator()() { CUDF_FAIL("This functor is only meant for physical data types"); } template <typename T> std::enable_if_t<cudf::is_dictionary<T>(), void> operator()() { CUDF_FAIL("Dictionary columns are not supported for writing"); } }; inline bool is_col_nullable(LinkedColPtr const& col, column_in_metadata const& col_meta, bool single_write_mode) { if (single_write_mode) { return col->nullable(); } else { if (col_meta.is_nullability_defined()) { CUDF_EXPECTS(col_meta.nullable() || !col->nullable(), "Mismatch in metadata prescribed nullability and input column nullability. " "Metadata for nullable input column cannot prescribe nullability = false"); return col_meta.nullable(); } else { // For chunked write, when not provided nullability, we assume the worst case scenario // that all columns are nullable. return true; } } } /** * @brief Construct schema from input columns and per-column input options * * Recursively traverses through linked_columns and corresponding metadata to construct schema tree. * The resulting schema tree is stored in a vector in pre-order traversal order. */ std::vector<schema_tree_node> construct_schema_tree(LinkedColVector const& linked_columns, table_input_metadata& metadata, bool single_write_mode, bool int96_timestamps) { std::vector<schema_tree_node> schema; schema_tree_node root{}; root.type = UNDEFINED_TYPE; root.repetition_type = NO_REPETITION_TYPE; root.name = "schema"; root.num_children = linked_columns.size(); root.parent_idx = -1; // root schema has no parent schema.push_back(std::move(root)); std::function<void(LinkedColPtr const&, column_in_metadata&, size_t)> add_schema = [&](LinkedColPtr const& col, column_in_metadata& col_meta, size_t parent_idx) { bool col_nullable = is_col_nullable(col, col_meta, single_write_mode); if (col->type().id() == type_id::STRUCT) { // if struct, add current and recursively call for all children schema_tree_node struct_schema{}; struct_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; struct_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); struct_schema.num_children = col->num_children(); struct_schema.parent_idx = parent_idx; schema.push_back(std::move(struct_schema)); auto struct_node_index = schema.size() - 1; // for (auto child_it = col->children.begin(); child_it < col->children.end(); child_it++) { // add_schema(*child_it, struct_node_index); // } CUDF_EXPECTS(col->num_children() == static_cast<int>(col_meta.num_children()), "Mismatch in number of child columns between input table and metadata"); for (size_t i = 0; i < col->children.size(); ++i) { add_schema(col->children[i], col_meta.child(i), struct_node_index); } } else if (col->type().id() == type_id::LIST && !col_meta.is_map()) { // List schema is denoted by two levels for each nesting level and one final level for leaf. // The top level is the same name as the column name. // So e.g. List<List<int>> is denoted in the schema by // "col_name" : { "list" : { "element" : { "list" : { "element" } } } } schema_tree_node list_schema_1{}; list_schema_1.converted_type = ConvertedType::LIST; list_schema_1.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; list_schema_1.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); list_schema_1.num_children = 1; list_schema_1.parent_idx = parent_idx; schema.push_back(std::move(list_schema_1)); schema_tree_node list_schema_2{}; list_schema_2.repetition_type = FieldRepetitionType::REPEATED; list_schema_2.name = "list"; list_schema_2.num_children = 1; list_schema_2.parent_idx = schema.size() - 1; // Parent is list_schema_1, last added. schema.push_back(std::move(list_schema_2)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); add_schema(col->children[lists_column_view::child_column_index], col_meta.child(lists_column_view::child_column_index), schema.size() - 1); } else if (col->type().id() == type_id::LIST && col_meta.is_map()) { // Map schema is denoted by a list of struct // e.g. List<Struct<String,String>> will be // "col_name" : { "key_value" : { "key", "value" } } // verify the List child structure is a struct<left_child, right_child> auto const& struct_col = col->child(lists_column_view::child_column_index); CUDF_EXPECTS(struct_col.type().id() == type_id::STRUCT, "Map should be a List of struct"); CUDF_EXPECTS(struct_col.num_children() == 2, "Map should be a List of struct with two children only but found " + std::to_string(struct_col.num_children())); schema_tree_node map_schema{}; map_schema.converted_type = ConvertedType::MAP; map_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; map_schema.name = col_meta.get_name(); map_schema.num_children = 1; map_schema.parent_idx = parent_idx; schema.push_back(std::move(map_schema)); schema_tree_node repeat_group{}; repeat_group.repetition_type = FieldRepetitionType::REPEATED; repeat_group.name = "key_value"; repeat_group.num_children = 2; repeat_group.parent_idx = schema.size() - 1; // Parent is map_schema, last added. schema.push_back(std::move(repeat_group)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); CUDF_EXPECTS(col_meta.child(lists_column_view::child_column_index).num_children() == 2, "Map struct column should have exactly two children"); // verify the col meta of children of the struct have name key and value auto& left_child_meta = col_meta.child(lists_column_view::child_column_index).child(0); left_child_meta.set_name("key"); left_child_meta.set_nullability(false); auto& right_child_meta = col_meta.child(lists_column_view::child_column_index).child(1); right_child_meta.set_name("value"); // check the repetition type of key is required i.e. the col should be non-nullable auto key_col = col->children[lists_column_view::child_column_index]->children[0]; CUDF_EXPECTS(!is_col_nullable(key_col, left_child_meta, single_write_mode), "key column cannot be nullable. For chunked writing, explicitly set the " "nullability to false in metadata"); // process key size_type struct_col_index = schema.size() - 1; add_schema(key_col, left_child_meta, struct_col_index); // process value add_schema(col->children[lists_column_view::child_column_index]->children[1], right_child_meta, struct_col_index); } else { // if leaf, add current if (col->type().id() == type_id::STRING) { CUDF_EXPECTS(col_meta.num_children() == 2 or col_meta.num_children() == 0, "String column's corresponding metadata should have zero or two children"); } else { CUDF_EXPECTS(col_meta.num_children() == 0, "Leaf column's corresponding metadata cannot have children"); } schema_tree_node col_schema{}; bool timestamp_is_int96 = int96_timestamps or col_meta.is_enabled_int96_timestamps(); cudf::type_dispatcher(col->type(), leaf_schema_fn{col_schema, col, col_meta, timestamp_is_int96}); col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED; col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); col_schema.parent_idx = parent_idx; col_schema.leaf_column = col; schema.push_back(col_schema); } }; CUDF_EXPECTS(metadata.column_metadata.size() == linked_columns.size(), "Mismatch in the number of columns and the corresponding metadata elements"); // Add all linked_columns to schema using parent_idx = 0 (root) for (size_t i = 0; i < linked_columns.size(); ++i) { add_schema(linked_columns[i], metadata.column_metadata[i], 0); } return schema; } /** * @brief Class to store parquet specific information for one data stream. * * Contains information about a single data stream. In case of struct columns, a data stream is one * of the child leaf columns that contains data. * e.g. A column Struct<int, List<float>> contains 2 data streams: * - Struct<int> * - Struct<List<float>> * */ struct parquet_column_view { parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream); column_view leaf_column_view() const; gpu::parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const; column_view cudf_column_view() const { return cudf_col; } parquet::Type physical_type() const { return schema_node.type; } std::vector<std::string> const& get_path_in_schema() { return path_in_schema; } // LIST related member functions uint8_t max_def_level() const noexcept { return _max_def_level; } uint8_t max_rep_level() const noexcept { return _max_rep_level; } bool is_list() const noexcept { return _is_list; } private: // Schema related members schema_tree_node schema_node; std::vector<std::string> path_in_schema; uint8_t _max_def_level = 0; uint8_t _max_rep_level = 0; rmm::device_uvector<uint8_t> _d_nullability; column_view cudf_col; // List-related members bool _is_list; rmm::device_uvector<size_type> _dremel_offsets; ///< For each row, the absolute offset into the repetition and definition ///< level vectors. O(num rows) rmm::device_uvector<uint8_t> _rep_level; rmm::device_uvector<uint8_t> _def_level; std::vector<uint8_t> _nullability; size_type _data_count = 0; }; parquet_column_view::parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream) : schema_node(schema_node), _d_nullability(0, stream), _dremel_offsets(0, stream), _rep_level(0, stream), _def_level(0, stream) { // Construct single inheritance column_view from linked_column_view auto curr_col = schema_node.leaf_column.get(); column_view single_inheritance_cudf_col = *curr_col; while (curr_col->parent) { auto const& parent = *curr_col->parent; // For list columns, we still need to retain the offset child column. auto children = (parent.type().id() == type_id::LIST) ? std::vector<column_view>{parent.child(lists_column_view::offsets_column_index), single_inheritance_cudf_col} : std::vector<column_view>{single_inheritance_cudf_col}; single_inheritance_cudf_col = column_view(parent.type(), parent.size(), parent.head(), parent.null_mask(), UNKNOWN_NULL_COUNT, parent.offset(), children); curr_col = curr_col->parent; } cudf_col = single_inheritance_cudf_col; // Construct path_in_schema by travelling up in the schema_tree std::vector<std::string> path; auto curr_schema_node = schema_node; do { path.push_back(curr_schema_node.name); if (curr_schema_node.parent_idx != -1) { curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } } while (curr_schema_node.parent_idx != -1); path_in_schema = std::vector<std::string>(path.crbegin(), path.crend()); // Calculate max definition level by counting the number of levels that are optional (nullable) // and max repetition level by counting the number of REPEATED levels in this column's hierarchy uint16_t max_def_level = 0; uint16_t max_rep_level = 0; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (curr_schema_node.repetition_type == parquet::REPEATED or curr_schema_node.repetition_type == parquet::OPTIONAL) { ++max_def_level; } if (curr_schema_node.repetition_type == parquet::REPEATED) { ++max_rep_level; } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } CUDF_EXPECTS(max_def_level < 256, "Definition levels above 255 are not supported"); CUDF_EXPECTS(max_rep_level < 256, "Definition levels above 255 are not supported"); _max_def_level = max_def_level; _max_rep_level = max_rep_level; // Construct nullability vector using repetition_type from schema. std::vector<uint8_t> r_nullability; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (not curr_schema_node.is_stub()) { r_nullability.push_back(curr_schema_node.repetition_type == FieldRepetitionType::OPTIONAL); } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } _nullability = std::vector<uint8_t>(r_nullability.crbegin(), r_nullability.crend()); // TODO(cp): Explore doing this for all columns in a single go outside this ctor. Maybe using // hostdevice_vector. Currently this involves a hipMemcpyAsync for each column. _d_nullability = rmm::device_uvector<uint8_t>(_nullability.size(), stream); CUDA_TRY(hipMemcpyAsync(_d_nullability.data(), _nullability.data(), _nullability.size() * sizeof(uint8_t), hipMemcpyHostToDevice, stream.value())); _is_list = (_max_rep_level > 0); if (cudf_col.size() == 0) { return; } if (_is_list) { // Top level column's offsets are not applied to all children. Get the effective offset and // size of the leaf column // Calculate row offset into dremel data (repetition/definition values) and the respective // definition and repetition levels gpu::dremel_data dremel = gpu::get_dremel_data(cudf_col, _d_nullability, _nullability, stream); _dremel_offsets = std::move(dremel.dremel_offsets); _rep_level = std::move(dremel.rep_level); _def_level = std::move(dremel.def_level); _data_count = dremel.leaf_data_size; // Needed for knowing what size dictionary to allocate stream.synchronize(); } else { // For non-list struct, the size of the root column is the same as the size of the leaf column _data_count = cudf_col.size(); } } column_view parquet_column_view::leaf_column_view() const { auto col = cudf_col; while (cudf::is_nested(col.type())) { if (col.type().id() == type_id::LIST) { col = col.child(lists_column_view::child_column_index); } else if (col.type().id() == type_id::STRUCT) { col = col.child(0); // Stored cudf_col has only one child if struct } } return col; } gpu::parquet_column_device_view parquet_column_view::get_device_view( rmm::cuda_stream_view stream) const { column_view col = leaf_column_view(); auto desc = gpu::parquet_column_device_view{}; // Zero out all fields desc.stats_dtype = schema_node.stats_dtype; desc.ts_scale = schema_node.ts_scale; if (is_list()) { desc.level_offsets = _dremel_offsets.data(); desc.rep_values = _rep_level.data(); desc.def_values = _def_level.data(); } desc.num_rows = cudf_col.size(); desc.physical_type = static_cast<uint8_t>(physical_type()); desc.level_bits = CompactProtocolReader::NumRequiredBits(max_rep_level()) << 4 | CompactProtocolReader::NumRequiredBits(max_def_level()); desc.nullability = _d_nullability.data(); return desc; } void writer::impl::init_page_fragments(cudf::detail::hostdevice_2dvector<gpu::PageFragment>& frag, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_rows, uint32_t fragment_size) { gpu::InitPageFragments(frag, col_desc, fragment_size, num_rows, stream); frag.device_to_host(stream, true); } void writer::impl::gather_fragment_statistics( device_2dspan<statistics_chunk> frag_stats_chunk, device_2dspan<gpu::PageFragment const> frag, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_fragments) { auto num_columns = col_desc.size(); rmm::device_uvector<statistics_group> frag_stats_group(num_fragments * num_columns, stream); auto frag_stats_group_2dview = device_2dspan<statistics_group>(frag_stats_group.data(), num_columns, num_fragments); gpu::InitFragmentStatistics(frag_stats_group_2dview, frag, col_desc, stream); detail::calculate_group_statistics<detail::io_file_format::PARQUET>( frag_stats_chunk.data(), frag_stats_group.data(), num_fragments * num_columns, stream); stream.synchronize(); } void writer::impl::init_page_sizes(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_columns) { chunks.host_to_device(stream); gpu::InitEncoderPages(chunks, {}, col_desc, num_columns, nullptr, nullptr, 0, stream); chunks.device_to_host(stream, true); } auto build_chunk_dictionaries(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, host_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_rows, rmm::cuda_stream_view stream) { // At this point, we know all chunks and their sizes. We want to allocate dictionaries for each // chunk that can have dictionary auto h_chunks = chunks.host_view().flat_view(); std::vector<rmm::device_uvector<size_type>> dict_data; std::vector<rmm::device_uvector<uint16_t>> dict_index; if (h_chunks.size() == 0) { return std::make_pair(std::move(dict_data), std::move(dict_index)); } // Allocate slots for each chunk std::vector<rmm::device_uvector<gpu::slot_type>> hash_maps_storage; hash_maps_storage.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (col_desc[chunk.col_desc_id].physical_type == Type::BOOLEAN) { chunk.use_dictionary = false; } else { chunk.use_dictionary = true; auto& inserted_map = hash_maps_storage.emplace_back(chunk.num_values, stream); chunk.dict_map_slots = inserted_map.data(); chunk.dict_map_size = inserted_map.size(); } } chunks.host_to_device(stream); gpu::initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream); gpu::populate_chunk_hash_maps(chunks, num_rows, stream); chunks.device_to_host(stream, true); // Make decision about which chunks have dictionary for (auto& ck : h_chunks) { if (not ck.use_dictionary) { continue; } std::tie(ck.use_dictionary, ck.dict_rle_bits) = [&]() { // calculate size of chunk if dictionary is used // If we have N unique values then the idx for the last value is N - 1 and nbits is the number // of bits required to encode indices into the dictionary auto max_dict_index = (ck.num_dict_entries > 0) ? ck.num_dict_entries - 1 : 0; auto nbits = CompactProtocolReader::NumRequiredBits(max_dict_index); // We don't use dictionary if the indices are > 16 bits because that's the maximum bitpacking // bitsize we efficiently support if (nbits > 16) { return std::make_pair(false, 0); } // Only these bit sizes are allowed for RLE encoding because it's compute optimized constexpr auto allowed_bitsizes = std::array<size_type, 6>{1, 2, 4, 8, 12, 16}; // ceil to (1/2/4/8/12/16) auto rle_bits = *std::lower_bound(allowed_bitsizes.begin(), allowed_bitsizes.end(), nbits); auto rle_byte_size = util::div_rounding_up_safe(ck.num_values * rle_bits, 8); auto dict_enc_size = ck.uniq_data_size + rle_byte_size; bool use_dict = (ck.plain_data_size > dict_enc_size); if (not use_dict) { rle_bits = 0; } return std::make_pair(use_dict, rle_bits); }(); } // TODO: (enh) Deallocate hash map storage for chunks that don't use dict and clear pointers. dict_data.reserve(h_chunks.size()); dict_index.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (not chunk.use_dictionary) { continue; } size_t dict_data_size = ::min(MAX_DICT_SIZE, chunk.dict_map_size); auto& inserted_dict_data = dict_data.emplace_back(dict_data_size, stream); auto& inserted_dict_index = dict_index.emplace_back(chunk.num_values, stream); chunk.dict_data = inserted_dict_data.data(); chunk.dict_index = inserted_dict_index.data(); } chunks.host_to_device(stream); gpu::collect_map_entries(chunks.device_view().flat_view(), stream); gpu::get_dictionary_indices(chunks.device_view(), num_rows, stream); return std::make_pair(std::move(dict_data), std::move(dict_index)); } void writer::impl::init_encoder_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::parquet_column_device_view const> col_desc, device_span<gpu::EncPage> pages, statistics_chunk* page_stats, statistics_chunk* frag_stats, size_t max_page_comp_data_size, uint32_t num_columns, uint32_t num_pages, uint32_t num_stats_bfr) { rmm::device_uvector<statistics_merge_group> page_stats_mrg(num_stats_bfr, stream); chunks.host_to_device(stream); InitEncoderPages(chunks, pages, col_desc, num_columns, (num_stats_bfr) ? page_stats_mrg.data() : nullptr, (num_stats_bfr > num_pages) ? page_stats_mrg.data() + num_pages : nullptr, max_page_comp_data_size, stream); if (num_stats_bfr > 0) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats, frag_stats, page_stats_mrg.data(), num_pages, stream); if (num_stats_bfr > num_pages) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats + num_pages, page_stats, page_stats_mrg.data() + num_pages, num_stats_bfr - num_pages, stream); } } stream.synchronize(); } void snappy_compress(device_span<gpu_inflate_input_s const> comp_in, device_span<gpu_inflate_status_s> comp_stat, size_t max_page_uncomp_data_size, rmm::cuda_stream_view stream) { size_t num_comp_pages = comp_in.size(); try { size_t temp_size; nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize( num_comp_pages, max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &temp_size); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in getting snappy compression scratch size"); // Not needed now but nvcomp API makes no promises about future rmm::device_buffer scratch(temp_size, stream); // Analogous to comp_in.srcDevice rmm::device_uvector<void const*> uncompressed_data_ptrs(num_comp_pages, stream); // Analogous to comp_in.srcSize rmm::device_uvector<size_t> uncompressed_data_sizes(num_comp_pages, stream); // Analogous to comp_in.dstDevice rmm::device_uvector<void*> compressed_data_ptrs(num_comp_pages, stream); // Analogous to comp_stat.bytes_written rmm::device_uvector<size_t> compressed_bytes_written(num_comp_pages, stream); // nvcomp does not currently use comp_in.dstSize. Cannot assume that the output will fit in // the space allocated unless one uses the API nvcompBatchedSnappyCompressGetOutputSize() // Prepare the vectors auto comp_it = thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin(), compressed_data_ptrs.begin()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice); }); nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(), uncompressed_data_sizes.data(), max_page_uncomp_data_size, num_comp_pages, scratch.data(), // Not needed rn but future scratch.size(), compressed_data_ptrs.data(), compressed_bytes_written.data(), nvcompBatchedSnappyDefaultOpts, stream.value()); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression"); // nvcomp also doesn't use comp_out.status . It guarantees that given enough output space, // compression will succeed. // The other `comp_out` field is `reserved` which is for internal cuIO debugging and can be 0. thrust::transform(rmm::exec_policy(stream), compressed_bytes_written.begin(), compressed_bytes_written.end(), comp_stat.begin(), [] __device__(size_t size) { gpu_inflate_status_s status{}; status.bytes_written = size; return status; }); return; } catch (...) { // If we reach this then there was an error in compressing so set an error status for each page thrust::for_each(rmm::exec_policy(stream), comp_stat.begin(), comp_stat.end(), [] __device__(gpu_inflate_status_s & stat) { stat.status = 1; }); }; } void writer::impl::encode_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::EncPage> pages, size_t max_page_uncomp_data_size, uint32_t pages_in_batch, uint32_t first_page_in_batch, uint32_t rowgroups_in_batch, uint32_t first_rowgroup, const statistics_chunk* page_stats, const statistics_chunk* chunk_stats) { auto batch_pages = pages.subspan(first_page_in_batch, pages_in_batch); auto batch_pages_stats = (page_stats != nullptr) ? device_span<statistics_chunk const>(page_stats + first_page_in_batch, pages_in_batch) : device_span<statistics_chunk const>(); uint32_t max_comp_pages = (compression_ != parquet::Compression::UNCOMPRESSED) ? pages_in_batch : 0; rmm::device_uvector<gpu_inflate_input_s> compression_input(max_comp_pages, stream); rmm::device_uvector<gpu_inflate_status_s> compression_status(max_comp_pages, stream); device_span<gpu_inflate_input_s> comp_in{compression_input.data(), compression_input.size()}; device_span<gpu_inflate_status_s> comp_stat{compression_status.data(), compression_status.size()}; gpu::EncodePages(batch_pages, comp_in, comp_stat, stream); auto env_use_nvcomp = std::getenv("LIBCUDF_USE_NVCOMP"); bool use_nvcomp = env_use_nvcomp != nullptr ? std::atoi(env_use_nvcomp) : 0; switch (compression_) { case parquet::Compression::SNAPPY: if (use_nvcomp) { snappy_compress(comp_in, comp_stat, max_page_uncomp_data_size, stream); } else { CUDA_TRY(gpu_snap(comp_in.data(), comp_stat.data(), pages_in_batch, stream)); } break; default: break; } // TBD: Not clear if the official spec actually allows dynamically turning off compression at the // chunk-level auto d_chunks_in_batch = chunks.device_view().subspan(first_rowgroup, rowgroups_in_batch); DecideCompression(d_chunks_in_batch.flat_view(), stream); EncodePageHeaders(batch_pages, comp_stat, batch_pages_stats, chunk_stats, stream); GatherPages(d_chunks_in_batch.flat_view(), pages, stream); auto h_chunks_in_batch = chunks.host_view().subspan(first_rowgroup, rowgroups_in_batch); CUDA_TRY(hipMemcpyAsync(h_chunks_in_batch.data(), d_chunks_in_batch.data(), d_chunks_in_batch.flat_view().size_bytes(), hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); } writer::impl::impl(std::unique_ptr<data_sink> sink, parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _mr(mr), stream(stream), compression_(to_parquet_compression(options.get_compression())), stats_granularity_(options.get_stats_level()), int96_timestamps(options.is_enabled_int96_timestamps()), out_sink_(std::move(sink)), single_write_mode(mode == SingleWriteMode::YES) { if (options.get_metadata()) { table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::impl(std::unique_ptr<data_sink> sink, chunked_parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _mr(mr), stream(stream), compression_(to_parquet_compression(options.get_compression())), stats_granularity_(options.get_stats_level()), int96_timestamps(options.is_enabled_int96_timestamps()), single_write_mode(mode == SingleWriteMode::YES), out_sink_(std::move(sink)) { if (options.get_metadata()) { table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::~impl() { close(); } void writer::impl::init_state() { // Write file header file_header_s fhdr; fhdr.magic = parquet_magic; out_sink_->host_write(&fhdr, sizeof(fhdr)); current_chunk_offset = sizeof(file_header_s); } void writer::impl::write(table_view const& table) { CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed"); size_type num_rows = table.num_rows(); if (not table_meta) { table_meta = std::make_unique<table_input_metadata>(table); } // Fill unnamed columns' names in table_meta std::function<void(column_in_metadata&, std::string)> add_default_name = [&](column_in_metadata& col_meta, std::string default_name) { if (col_meta.get_name().empty()) col_meta.set_name(default_name); for (size_type i = 0; i < col_meta.num_children(); ++i) { add_default_name(col_meta.child(i), col_meta.get_name() + "_" + std::to_string(i)); } }; for (size_t i = 0; i < table_meta->column_metadata.size(); ++i) { add_default_name(table_meta->column_metadata[i], "_col" + std::to_string(i)); } auto vec = input_table_to_linked_columns(table); auto schema_tree = construct_schema_tree(vec, *table_meta, single_write_mode, int96_timestamps); // Construct parquet_column_views from the schema tree leaf nodes. std::vector<parquet_column_view> parquet_columns; for (schema_tree_node const& schema_node : schema_tree) { if (schema_node.leaf_column) { parquet_columns.emplace_back(schema_node, schema_tree, stream); } } // Mass allocation of column_device_views for each parquet_column_view std::vector<column_view> cudf_cols; cudf_cols.reserve(parquet_columns.size()); for (auto const& parq_col : parquet_columns) { cudf_cols.push_back(parq_col.cudf_column_view()); } table_view single_streams_table(cudf_cols); size_type num_columns = single_streams_table.num_columns(); std::vector<SchemaElement> this_table_schema(schema_tree.begin(), schema_tree.end()); if (md.version == 0) { md.version = 1; md.num_rows = num_rows; md.column_order_listsize = (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_columns : 0; std::transform(table_meta->user_data.begin(), table_meta->user_data.end(), std::back_inserter(md.key_value_metadata), [](auto const& kv) { return KeyValue{kv.first, kv.second}; }); md.schema = this_table_schema; } else { // verify the user isn't passing mismatched tables CUDF_EXPECTS(md.schema == this_table_schema, "Mismatch in schema between multiple calls to write_chunk"); // increment num rows md.num_rows += num_rows; } // Create table_device_view so that corresponding column_device_view data // can be written into col_desc members auto parent_column_table_device_view = table_device_view::create(single_streams_table, stream); rmm::device_uvector<column_device_view> leaf_column_views(0, stream); // Initialize column description hostdevice_vector<gpu::parquet_column_device_view> col_desc(parquet_columns.size(), stream); std::transform( parquet_columns.begin(), parquet_columns.end(), col_desc.host_ptr(), [&](auto const& pcol) { return pcol.get_device_view(stream); }); // Init page fragments // 5000 is good enough for up to ~200-character strings. Longer strings will start producing // fragments larger than the desired page size -> TODO: keep track of the max fragment size, and // iteratively reduce this value if the largest fragment exceeds the max page size limit (we // ideally want the page size to be below 1MB so as to have enough pages to get good // compression/decompression performance). using cudf::io::parquet::gpu::max_page_fragment_size; uint32_t num_fragments = (uint32_t)((num_rows + max_page_fragment_size - 1) / max_page_fragment_size); cudf::detail::hostdevice_2dvector<gpu::PageFragment> fragments( num_columns, num_fragments, stream); if (num_fragments != 0) { // Move column info to device col_desc.host_to_device(stream); leaf_column_views = create_leaf_column_device_views<gpu::parquet_column_device_view>( col_desc, *parent_column_table_device_view, stream); init_page_fragments(fragments, col_desc, num_rows, max_page_fragment_size); } size_t global_rowgroup_base = md.row_groups.size(); // Decide row group boundaries based on uncompressed data size size_t rowgroup_size = 0; uint32_t num_rowgroups = 0; for (uint32_t f = 0, global_r = global_rowgroup_base, rowgroup_start = 0; f < num_fragments; f++) { size_t fragment_data_size = 0; // Replace with STL algorithm to transform and sum for (auto i = 0; i < num_columns; i++) { fragment_data_size += fragments[i][f].fragment_data_size; } if (f > rowgroup_start && (rowgroup_size + fragment_data_size > max_rowgroup_size_ || (f + 1 - rowgroup_start) * max_page_fragment_size > max_rowgroup_rows_)) { // update schema md.row_groups.resize(md.row_groups.size() + 1); md.row_groups[global_r++].num_rows = (f - rowgroup_start) * max_page_fragment_size; num_rowgroups++; rowgroup_start = f; rowgroup_size = 0; } rowgroup_size += fragment_data_size; if (f + 1 == num_fragments) { // update schema md.row_groups.resize(md.row_groups.size() + 1); md.row_groups[global_r++].num_rows = num_rows - rowgroup_start * max_page_fragment_size; num_rowgroups++; } } // Allocate column chunks and gather fragment statistics rmm::device_uvector<statistics_chunk> frag_stats(0, stream); if (stats_granularity_ != statistics_freq::STATISTICS_NONE) { frag_stats.resize(num_fragments * num_columns, stream); if (frag_stats.size() != 0) { auto frag_stats_2dview = device_2dspan<statistics_chunk>(frag_stats.data(), num_columns, num_fragments); gather_fragment_statistics(frag_stats_2dview, fragments, col_desc, num_fragments); } } // Initialize row groups and column chunks uint32_t num_chunks = num_rowgroups * num_columns; hostdevice_2dvector<gpu::EncColumnChunk> chunks(num_rowgroups, num_columns, stream); for (uint32_t r = 0, global_r = global_rowgroup_base, f = 0, start_row = 0; r < num_rowgroups; r++, global_r++) { uint32_t fragments_in_chunk = (uint32_t)( (md.row_groups[global_r].num_rows + max_page_fragment_size - 1) / max_page_fragment_size); md.row_groups[global_r].total_byte_size = 0; md.row_groups[global_r].columns.resize(num_columns); for (int i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; *ck = {}; ck->col_desc = col_desc.device_ptr() + i; ck->col_desc_id = i; ck->fragments = &fragments.device_view()[i][f]; ck->stats = (frag_stats.size() != 0) ? frag_stats.data() + i * num_fragments + f : nullptr; ck->start_row = start_row; ck->num_rows = (uint32_t)md.row_groups[global_r].num_rows; ck->first_fragment = i * num_fragments + f; auto chunk_fragments = fragments[i].subspan(f, fragments_in_chunk); ck->num_values = std::accumulate(chunk_fragments.begin(), chunk_fragments.end(), 0, [](uint32_t l, auto r) { return l + r.num_values; }); ck->plain_data_size = std::accumulate( chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, gpu::PageFragment frag) { return sum + frag.fragment_data_size; }); md.row_groups[global_r].columns[i].meta_data.type = parquet_columns[i].physical_type(); md.row_groups[global_r].columns[i].meta_data.encodings = {Encoding::PLAIN, Encoding::RLE}; md.row_groups[global_r].columns[i].meta_data.path_in_schema = parquet_columns[i].get_path_in_schema(); md.row_groups[global_r].columns[i].meta_data.codec = UNCOMPRESSED; md.row_groups[global_r].columns[i].meta_data.num_values = ck->num_values; } f += fragments_in_chunk; start_row += (uint32_t)md.row_groups[global_r].num_rows; } auto dict_info_owner = build_chunk_dictionaries(chunks, col_desc, num_rows, stream); for (uint32_t rg = 0, global_rg = global_rowgroup_base; rg < num_rowgroups; rg++, global_rg++) { for (int col = 0; col < num_columns; col++) { if (chunks.host_view()[rg][col].use_dictionary) { md.row_groups[global_rg].columns[col].meta_data.encodings.push_back( Encoding::PLAIN_DICTIONARY); } } } // Build chunk dictionaries and count pages if (num_chunks != 0) { init_page_sizes(chunks, col_desc, num_columns); } // Get the maximum page size across all chunks size_type max_page_uncomp_data_size = std::accumulate(chunks.host_view().flat_view().begin(), chunks.host_view().flat_view().end(), 0, [](uint32_t max_page_size, gpu::EncColumnChunk const& chunk) { return ::max(max_page_size, chunk.max_page_data_size); }); size_t max_page_comp_data_size = 0; if (compression_ != parquet::Compression::UNCOMPRESSED) { auto status = nvcompBatchedSnappyCompressGetMaxOutputChunkSize( max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &max_page_comp_data_size); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "Error in getting compressed size from nvcomp"); } // Initialize batches of rowgroups to encode (mainly to limit peak memory usage) std::vector<uint32_t> batch_list; uint32_t num_pages = 0; size_t max_bytes_in_batch = 1024 * 1024 * 1024; // 1GB - TBD: Tune this size_t max_uncomp_bfr_size = 0; size_t max_comp_bfr_size = 0; size_t max_chunk_bfr_size = 0; uint32_t max_pages_in_batch = 0; size_t bytes_in_batch = 0; size_t comp_bytes_in_batch = 0; for (uint32_t r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) { size_t rowgroup_size = 0; size_t comp_rowgroup_size = 0; if (r < num_rowgroups) { for (int i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; ck->first_page = num_pages; num_pages += ck->num_pages; pages_in_batch += ck->num_pages; rowgroup_size += ck->bfr_size; ck->compressed_size = ck->ck_stat_size + ck->page_headers_size + max_page_comp_data_size * ck->num_pages; comp_rowgroup_size += ck->compressed_size; max_chunk_bfr_size = ::max(max_chunk_bfr_size, (size_t)::max(ck->bfr_size, ck->compressed_size)); } } // TBD: We may want to also shorten the batch if we have enough pages (not just based on size) if ((r == num_rowgroups) || (groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) { max_uncomp_bfr_size = ::max(max_uncomp_bfr_size, bytes_in_batch); max_comp_bfr_size = ::max(max_comp_bfr_size, comp_bytes_in_batch); max_pages_in_batch = ::max(max_pages_in_batch, pages_in_batch); if (groups_in_batch != 0) { batch_list.push_back(groups_in_batch); groups_in_batch = 0; } bytes_in_batch = 0; comp_bytes_in_batch = 0; pages_in_batch = 0; } bytes_in_batch += rowgroup_size; comp_bytes_in_batch += comp_rowgroup_size; groups_in_batch++; } // Clear compressed buffer size if compression has been turned off if (compression_ == parquet::Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; } // Initialize data pointers in batch uint32_t num_stats_bfr = (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0; rmm::device_buffer uncomp_bfr(max_uncomp_bfr_size, stream); rmm::device_buffer comp_bfr(max_comp_bfr_size, stream); rmm::device_uvector<gpu::EncPage> pages(num_pages, stream); // This contains stats for both the pages and the rowgroups. TODO: make them separate. rmm::device_uvector<statistics_chunk> page_stats(num_stats_bfr, stream); for (uint32_t b = 0, r = 0; b < (uint32_t)batch_list.size(); b++) { uint8_t* bfr = static_cast<uint8_t*>(uncomp_bfr.data()); uint8_t* bfr_c = static_cast<uint8_t*>(comp_bfr.data()); for (uint32_t j = 0; j < batch_list[b]; j++, r++) { for (int i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; ck->uncompressed_bfr = bfr; ck->compressed_bfr = bfr_c; bfr += ck->bfr_size; bfr_c += ck->compressed_size; } } } if (num_pages != 0) { init_encoder_pages(chunks, col_desc, {pages.data(), pages.size()}, (num_stats_bfr) ? page_stats.data() : nullptr, (num_stats_bfr) ? frag_stats.data() : nullptr, max_page_comp_data_size, num_columns, num_pages, num_stats_bfr); } pinned_buffer<uint8_t> host_bfr{nullptr, hipHostFree}; // Encode row groups in batches for (uint32_t b = 0, r = 0, global_r = global_rowgroup_base; b < (uint32_t)batch_list.size(); b++) { // Count pages in this batch uint32_t rnext = r + batch_list[b]; uint32_t first_page_in_batch = chunks[r][0].first_page; uint32_t first_page_in_next_batch = (rnext < num_rowgroups) ? chunks[rnext][0].first_page : num_pages; uint32_t pages_in_batch = first_page_in_next_batch - first_page_in_batch; // device_span<gpu::EncPage> batch_pages{pages.data() + first_page_in_batch, } encode_pages( chunks, {pages.data(), pages.size()}, max_page_uncomp_data_size, pages_in_batch, first_page_in_batch, batch_list[b], r, (stats_granularity_ == statistics_freq::STATISTICS_PAGE) ? page_stats.data() : nullptr, (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? page_stats.data() + num_pages : nullptr); std::vector<std::future<void>> write_tasks; for (; r < rnext; r++, global_r++) { for (auto i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; uint8_t* dev_bfr; if (ck->is_compressed) { md.row_groups[global_r].columns[i].meta_data.codec = compression_; dev_bfr = ck->compressed_bfr; } else { dev_bfr = ck->uncompressed_bfr; } if (out_sink_->is_device_write_preferred(ck->compressed_size)) { // let the writer do what it wants to retrieve the data from the gpu. write_tasks.push_back( out_sink_->device_write_async(dev_bfr + ck->ck_stat_size, ck->compressed_size, stream)); // we still need to do a (much smaller) memcpy for the statistics. if (ck->ck_stat_size != 0) { md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(ck->ck_stat_size); CUDA_TRY( hipMemcpyAsync(md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(), dev_bfr, ck->ck_stat_size, hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); } } else { if (!host_bfr) { host_bfr = pinned_buffer<uint8_t>{[](size_t size) { uint8_t* ptr = nullptr; CUDA_TRY(hipHostMalloc(&ptr, size)); return ptr; }(max_chunk_bfr_size), hipHostFree}; } // copy the full data CUDA_TRY(hipMemcpyAsync(host_bfr.get(), dev_bfr, ck->ck_stat_size + ck->compressed_size, hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); out_sink_->host_write(host_bfr.get() + ck->ck_stat_size, ck->compressed_size); if (ck->ck_stat_size != 0) { md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(ck->ck_stat_size); memcpy(md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(), host_bfr.get(), ck->ck_stat_size); } } md.row_groups[global_r].total_byte_size += ck->compressed_size; md.row_groups[global_r].columns[i].meta_data.data_page_offset = current_chunk_offset + ((ck->use_dictionary) ? ck->dictionary_size : 0); md.row_groups[global_r].columns[i].meta_data.dictionary_page_offset = (ck->use_dictionary) ? current_chunk_offset : 0; md.row_groups[global_r].columns[i].meta_data.total_uncompressed_size = ck->bfr_size; md.row_groups[global_r].columns[i].meta_data.total_compressed_size = ck->compressed_size; current_chunk_offset += ck->compressed_size; } } for (auto const& task : write_tasks) { task.wait(); } } } std::unique_ptr<std::vector<uint8_t>> writer::impl::close( std::string const& column_chunks_file_path) { if (closed) { return nullptr; } closed = true; CompactProtocolWriter cpw(&buffer_); file_ender_s fendr; buffer_.resize(0); fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); fendr.magic = parquet_magic; out_sink_->host_write(buffer_.data(), buffer_.size()); out_sink_->host_write(&fendr, sizeof(fendr)); out_sink_->flush(); // Optionally output raw file metadata with the specified column chunk file path if (column_chunks_file_path.length() > 0) { file_header_s fhdr = {parquet_magic}; buffer_.resize(0); buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(&fhdr), reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr)); for (auto& rowgroup : md.row_groups) { for (auto& col : rowgroup.columns) { col.file_path = column_chunks_file_path; } } fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(&fendr), reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(buffer_)); } else { return {nullptr}; } } // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } writer::writer(std::unique_ptr<data_sink> sink, chunked_parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write(table_view const& table) { _impl->write(table); } // Forward to implementation std::unique_ptr<std::vector<uint8_t>> writer::close(std::string const& column_chunks_file_path) { return _impl->close(column_chunks_file_path); } std::unique_ptr<std::vector<uint8_t>> writer::merge_rowgroup_metadata( const std::vector<std::unique_ptr<std::vector<uint8_t>>>& metadata_list) { std::vector<uint8_t> output; CompactProtocolWriter cpw(&output); FileMetaData md; md.row_groups.reserve(metadata_list.size()); for (const auto& blob : metadata_list) { CompactProtocolReader cpreader( blob.get()->data(), std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s)); cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header if (md.num_rows == 0) { cpreader.read(&md); } else { FileMetaData tmp; cpreader.read(&tmp); md.row_groups.insert(md.row_groups.end(), std::make_move_iterator(tmp.row_groups.begin()), std::make_move_iterator(tmp.row_groups.end())); md.num_rows += tmp.num_rows; } } // Reader doesn't currently populate column_order, so infer it here if (md.row_groups.size() != 0) { uint32_t num_columns = static_cast<uint32_t>(md.row_groups[0].columns.size()); md.column_order_listsize = (num_columns > 0 && md.row_groups[0].columns[0].meta_data.statistics_blob.size()) ? num_columns : 0; } // Thrift-encode the resulting output file_header_s fhdr; file_ender_s fendr; fhdr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<const uint8_t*>(&fhdr), reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr)); fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); fendr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<const uint8_t*>(&fendr), reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(output)); } } // namespace parquet } // namespace detail } // namespace io } // namespace cudf
fac8d576f8a86ec4106c84b9e53890c8aad2a95b.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO parquet writer class implementation */ #include <io/statistics/column_statistics.cuh> #include "writer_impl.hpp" #include <io/utilities/column_utils.cuh> #include "compact_protocol_writer.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <nvcomp/snappy.h> #include <algorithm> #include <cstring> #include <numeric> #include <utility> namespace cudf { namespace io { namespace detail { namespace parquet { using namespace cudf::io::parquet; using namespace cudf::io; namespace { /** * @brief Helper for pinned host memory */ template <typename T> using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>; /** * @brief Function that translates GDF compression to parquet compression */ parquet::Compression to_parquet_compression(compression_type compression) { switch (compression) { case compression_type::AUTO: case compression_type::SNAPPY: return parquet::Compression::SNAPPY; case compression_type::NONE: return parquet::Compression::UNCOMPRESSED; default: CUDF_EXPECTS(false, "Unsupported compression type"); return parquet::Compression::UNCOMPRESSED; } } } // namespace struct linked_column_view; using LinkedColPtr = std::shared_ptr<linked_column_view>; using LinkedColVector = std::vector<LinkedColPtr>; /** * @brief column_view with the added member pointer to the parent of this column. * */ struct linked_column_view : public column_view { // TODO(cp): we are currently keeping all column_view children info multiple times - once for each // copy of this object. Options: // 1. Inherit from column_view_base. Only lose out on children vector. That is not needed. // 2. Don't inherit at all. make linked_column_view keep a reference wrapper to its column_view linked_column_view(column_view const& col) : column_view(col), parent(nullptr) { for (auto child_it = col.child_begin(); child_it < col.child_end(); ++child_it) { children.push_back(std::make_shared<linked_column_view>(this, *child_it)); } } linked_column_view(linked_column_view* parent, column_view const& col) : column_view(col), parent(parent) { for (auto child_it = col.child_begin(); child_it < col.child_end(); ++child_it) { children.push_back(std::make_shared<linked_column_view>(this, *child_it)); } } linked_column_view* parent; //!< Pointer to parent of this column. Nullptr if root LinkedColVector children; }; /** * @brief Converts all column_views of a table into linked_column_views * * @param table table of columns to convert * @return Vector of converted linked_column_views */ LinkedColVector input_table_to_linked_columns(table_view const& table) { LinkedColVector result; for (column_view const& col : table) { result.emplace_back(std::make_shared<linked_column_view>(col)); } return result; } /** * @brief Extends SchemaElement to add members required in constructing parquet_column_view * * Added members are: * 1. leaf_column: Pointer to leaf linked_column_view which points to the corresponding data stream * of a leaf schema node. For non-leaf struct node, this is nullptr. * 2. stats_dtype: datatype for statistics calculation required for the data stream of a leaf node. * 3. ts_scale: scale to multiply or divide timestamp by in order to convert timestamp to parquet * supported types */ struct schema_tree_node : public SchemaElement { LinkedColPtr leaf_column; statistics_dtype stats_dtype; int32_t ts_scale; // TODO(fut): Think about making schema a class that holds a vector of schema_tree_nodes. The // function construct_schema_tree could be its constructor. It can have method to get the per // column nullability given a schema node index corresponding to a leaf schema. Much easier than // that is a method to get path in schema, given a leaf node }; struct leaf_schema_fn { schema_tree_node& col_schema; LinkedColPtr const& col; column_in_metadata const& col_meta; bool timestamp_is_int96; template <typename T> std::enable_if_t<std::is_same_v<T, bool>, void> operator()() { col_schema.type = Type::BOOLEAN; col_schema.stats_dtype = statistics_dtype::dtype_bool; } template <typename T> std::enable_if_t<std::is_same_v<T, int8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; } template <typename T> std::enable_if_t<std::is_same_v<T, int16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; } template <typename T> std::enable_if_t<std::is_same_v<T, int32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, int64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, uint8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; } template <typename T> std::enable_if_t<std::is_same_v<T, uint16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; } template <typename T> std::enable_if_t<std::is_same_v<T, uint32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, uint64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::UINT_64; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, float>, void> operator()() { col_schema.type = Type::FLOAT; col_schema.stats_dtype = statistics_dtype::dtype_float32; } template <typename T> std::enable_if_t<std::is_same_v<T, double>, void> operator()() { col_schema.type = Type::DOUBLE; col_schema.stats_dtype = statistics_dtype::dtype_float64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::string_view>, void> operator()() { col_schema.type = Type::BYTE_ARRAY; col_schema.converted_type = ConvertedType::UTF8; col_schema.stats_dtype = statistics_dtype::dtype_string; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::DATE; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_s>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; col_schema.ts_scale = 1000; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ms>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_us>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ns>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; col_schema.ts_scale = -1000; // negative value indicates division by absolute value } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_s>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.ts_scale = 1000; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ms>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_us>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ns>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.ts_scale = -1000; // negative value indicates division by absolute value } template <typename T> std::enable_if_t<cudf::is_fixed_point<T>(), void> operator()() { if (std::is_same_v<T, numeric::decimal32>) { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } else if (std::is_same_v<T, numeric::decimal64>) { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_decimal64; } else { CUDF_FAIL("Unsupported fixed point type for parquet writer"); } col_schema.converted_type = ConvertedType::DECIMAL; col_schema.decimal_scale = -col->type().scale(); // parquet and cudf disagree about scale signs CUDF_EXPECTS(col_meta.is_decimal_precision_set(), "Precision must be specified for decimal columns"); CUDF_EXPECTS(col_meta.get_decimal_precision() >= col_schema.decimal_scale, "Precision must be equal to or greater than scale!"); col_schema.decimal_precision = col_meta.get_decimal_precision(); } template <typename T> std::enable_if_t<cudf::is_nested<T>(), void> operator()() { CUDF_FAIL("This functor is only meant for physical data types"); } template <typename T> std::enable_if_t<cudf::is_dictionary<T>(), void> operator()() { CUDF_FAIL("Dictionary columns are not supported for writing"); } }; inline bool is_col_nullable(LinkedColPtr const& col, column_in_metadata const& col_meta, bool single_write_mode) { if (single_write_mode) { return col->nullable(); } else { if (col_meta.is_nullability_defined()) { CUDF_EXPECTS(col_meta.nullable() || !col->nullable(), "Mismatch in metadata prescribed nullability and input column nullability. " "Metadata for nullable input column cannot prescribe nullability = false"); return col_meta.nullable(); } else { // For chunked write, when not provided nullability, we assume the worst case scenario // that all columns are nullable. return true; } } } /** * @brief Construct schema from input columns and per-column input options * * Recursively traverses through linked_columns and corresponding metadata to construct schema tree. * The resulting schema tree is stored in a vector in pre-order traversal order. */ std::vector<schema_tree_node> construct_schema_tree(LinkedColVector const& linked_columns, table_input_metadata& metadata, bool single_write_mode, bool int96_timestamps) { std::vector<schema_tree_node> schema; schema_tree_node root{}; root.type = UNDEFINED_TYPE; root.repetition_type = NO_REPETITION_TYPE; root.name = "schema"; root.num_children = linked_columns.size(); root.parent_idx = -1; // root schema has no parent schema.push_back(std::move(root)); std::function<void(LinkedColPtr const&, column_in_metadata&, size_t)> add_schema = [&](LinkedColPtr const& col, column_in_metadata& col_meta, size_t parent_idx) { bool col_nullable = is_col_nullable(col, col_meta, single_write_mode); if (col->type().id() == type_id::STRUCT) { // if struct, add current and recursively call for all children schema_tree_node struct_schema{}; struct_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; struct_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); struct_schema.num_children = col->num_children(); struct_schema.parent_idx = parent_idx; schema.push_back(std::move(struct_schema)); auto struct_node_index = schema.size() - 1; // for (auto child_it = col->children.begin(); child_it < col->children.end(); child_it++) { // add_schema(*child_it, struct_node_index); // } CUDF_EXPECTS(col->num_children() == static_cast<int>(col_meta.num_children()), "Mismatch in number of child columns between input table and metadata"); for (size_t i = 0; i < col->children.size(); ++i) { add_schema(col->children[i], col_meta.child(i), struct_node_index); } } else if (col->type().id() == type_id::LIST && !col_meta.is_map()) { // List schema is denoted by two levels for each nesting level and one final level for leaf. // The top level is the same name as the column name. // So e.g. List<List<int>> is denoted in the schema by // "col_name" : { "list" : { "element" : { "list" : { "element" } } } } schema_tree_node list_schema_1{}; list_schema_1.converted_type = ConvertedType::LIST; list_schema_1.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; list_schema_1.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); list_schema_1.num_children = 1; list_schema_1.parent_idx = parent_idx; schema.push_back(std::move(list_schema_1)); schema_tree_node list_schema_2{}; list_schema_2.repetition_type = FieldRepetitionType::REPEATED; list_schema_2.name = "list"; list_schema_2.num_children = 1; list_schema_2.parent_idx = schema.size() - 1; // Parent is list_schema_1, last added. schema.push_back(std::move(list_schema_2)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); add_schema(col->children[lists_column_view::child_column_index], col_meta.child(lists_column_view::child_column_index), schema.size() - 1); } else if (col->type().id() == type_id::LIST && col_meta.is_map()) { // Map schema is denoted by a list of struct // e.g. List<Struct<String,String>> will be // "col_name" : { "key_value" : { "key", "value" } } // verify the List child structure is a struct<left_child, right_child> auto const& struct_col = col->child(lists_column_view::child_column_index); CUDF_EXPECTS(struct_col.type().id() == type_id::STRUCT, "Map should be a List of struct"); CUDF_EXPECTS(struct_col.num_children() == 2, "Map should be a List of struct with two children only but found " + std::to_string(struct_col.num_children())); schema_tree_node map_schema{}; map_schema.converted_type = ConvertedType::MAP; map_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; map_schema.name = col_meta.get_name(); map_schema.num_children = 1; map_schema.parent_idx = parent_idx; schema.push_back(std::move(map_schema)); schema_tree_node repeat_group{}; repeat_group.repetition_type = FieldRepetitionType::REPEATED; repeat_group.name = "key_value"; repeat_group.num_children = 2; repeat_group.parent_idx = schema.size() - 1; // Parent is map_schema, last added. schema.push_back(std::move(repeat_group)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); CUDF_EXPECTS(col_meta.child(lists_column_view::child_column_index).num_children() == 2, "Map struct column should have exactly two children"); // verify the col meta of children of the struct have name key and value auto& left_child_meta = col_meta.child(lists_column_view::child_column_index).child(0); left_child_meta.set_name("key"); left_child_meta.set_nullability(false); auto& right_child_meta = col_meta.child(lists_column_view::child_column_index).child(1); right_child_meta.set_name("value"); // check the repetition type of key is required i.e. the col should be non-nullable auto key_col = col->children[lists_column_view::child_column_index]->children[0]; CUDF_EXPECTS(!is_col_nullable(key_col, left_child_meta, single_write_mode), "key column cannot be nullable. For chunked writing, explicitly set the " "nullability to false in metadata"); // process key size_type struct_col_index = schema.size() - 1; add_schema(key_col, left_child_meta, struct_col_index); // process value add_schema(col->children[lists_column_view::child_column_index]->children[1], right_child_meta, struct_col_index); } else { // if leaf, add current if (col->type().id() == type_id::STRING) { CUDF_EXPECTS(col_meta.num_children() == 2 or col_meta.num_children() == 0, "String column's corresponding metadata should have zero or two children"); } else { CUDF_EXPECTS(col_meta.num_children() == 0, "Leaf column's corresponding metadata cannot have children"); } schema_tree_node col_schema{}; bool timestamp_is_int96 = int96_timestamps or col_meta.is_enabled_int96_timestamps(); cudf::type_dispatcher(col->type(), leaf_schema_fn{col_schema, col, col_meta, timestamp_is_int96}); col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED; col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); col_schema.parent_idx = parent_idx; col_schema.leaf_column = col; schema.push_back(col_schema); } }; CUDF_EXPECTS(metadata.column_metadata.size() == linked_columns.size(), "Mismatch in the number of columns and the corresponding metadata elements"); // Add all linked_columns to schema using parent_idx = 0 (root) for (size_t i = 0; i < linked_columns.size(); ++i) { add_schema(linked_columns[i], metadata.column_metadata[i], 0); } return schema; } /** * @brief Class to store parquet specific information for one data stream. * * Contains information about a single data stream. In case of struct columns, a data stream is one * of the child leaf columns that contains data. * e.g. A column Struct<int, List<float>> contains 2 data streams: * - Struct<int> * - Struct<List<float>> * */ struct parquet_column_view { parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream); column_view leaf_column_view() const; gpu::parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const; column_view cudf_column_view() const { return cudf_col; } parquet::Type physical_type() const { return schema_node.type; } std::vector<std::string> const& get_path_in_schema() { return path_in_schema; } // LIST related member functions uint8_t max_def_level() const noexcept { return _max_def_level; } uint8_t max_rep_level() const noexcept { return _max_rep_level; } bool is_list() const noexcept { return _is_list; } private: // Schema related members schema_tree_node schema_node; std::vector<std::string> path_in_schema; uint8_t _max_def_level = 0; uint8_t _max_rep_level = 0; rmm::device_uvector<uint8_t> _d_nullability; column_view cudf_col; // List-related members bool _is_list; rmm::device_uvector<size_type> _dremel_offsets; ///< For each row, the absolute offset into the repetition and definition ///< level vectors. O(num rows) rmm::device_uvector<uint8_t> _rep_level; rmm::device_uvector<uint8_t> _def_level; std::vector<uint8_t> _nullability; size_type _data_count = 0; }; parquet_column_view::parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream) : schema_node(schema_node), _d_nullability(0, stream), _dremel_offsets(0, stream), _rep_level(0, stream), _def_level(0, stream) { // Construct single inheritance column_view from linked_column_view auto curr_col = schema_node.leaf_column.get(); column_view single_inheritance_cudf_col = *curr_col; while (curr_col->parent) { auto const& parent = *curr_col->parent; // For list columns, we still need to retain the offset child column. auto children = (parent.type().id() == type_id::LIST) ? std::vector<column_view>{parent.child(lists_column_view::offsets_column_index), single_inheritance_cudf_col} : std::vector<column_view>{single_inheritance_cudf_col}; single_inheritance_cudf_col = column_view(parent.type(), parent.size(), parent.head(), parent.null_mask(), UNKNOWN_NULL_COUNT, parent.offset(), children); curr_col = curr_col->parent; } cudf_col = single_inheritance_cudf_col; // Construct path_in_schema by travelling up in the schema_tree std::vector<std::string> path; auto curr_schema_node = schema_node; do { path.push_back(curr_schema_node.name); if (curr_schema_node.parent_idx != -1) { curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } } while (curr_schema_node.parent_idx != -1); path_in_schema = std::vector<std::string>(path.crbegin(), path.crend()); // Calculate max definition level by counting the number of levels that are optional (nullable) // and max repetition level by counting the number of REPEATED levels in this column's hierarchy uint16_t max_def_level = 0; uint16_t max_rep_level = 0; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (curr_schema_node.repetition_type == parquet::REPEATED or curr_schema_node.repetition_type == parquet::OPTIONAL) { ++max_def_level; } if (curr_schema_node.repetition_type == parquet::REPEATED) { ++max_rep_level; } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } CUDF_EXPECTS(max_def_level < 256, "Definition levels above 255 are not supported"); CUDF_EXPECTS(max_rep_level < 256, "Definition levels above 255 are not supported"); _max_def_level = max_def_level; _max_rep_level = max_rep_level; // Construct nullability vector using repetition_type from schema. std::vector<uint8_t> r_nullability; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (not curr_schema_node.is_stub()) { r_nullability.push_back(curr_schema_node.repetition_type == FieldRepetitionType::OPTIONAL); } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } _nullability = std::vector<uint8_t>(r_nullability.crbegin(), r_nullability.crend()); // TODO(cp): Explore doing this for all columns in a single go outside this ctor. Maybe using // hostdevice_vector. Currently this involves a cudaMemcpyAsync for each column. _d_nullability = rmm::device_uvector<uint8_t>(_nullability.size(), stream); CUDA_TRY(cudaMemcpyAsync(_d_nullability.data(), _nullability.data(), _nullability.size() * sizeof(uint8_t), cudaMemcpyHostToDevice, stream.value())); _is_list = (_max_rep_level > 0); if (cudf_col.size() == 0) { return; } if (_is_list) { // Top level column's offsets are not applied to all children. Get the effective offset and // size of the leaf column // Calculate row offset into dremel data (repetition/definition values) and the respective // definition and repetition levels gpu::dremel_data dremel = gpu::get_dremel_data(cudf_col, _d_nullability, _nullability, stream); _dremel_offsets = std::move(dremel.dremel_offsets); _rep_level = std::move(dremel.rep_level); _def_level = std::move(dremel.def_level); _data_count = dremel.leaf_data_size; // Needed for knowing what size dictionary to allocate stream.synchronize(); } else { // For non-list struct, the size of the root column is the same as the size of the leaf column _data_count = cudf_col.size(); } } column_view parquet_column_view::leaf_column_view() const { auto col = cudf_col; while (cudf::is_nested(col.type())) { if (col.type().id() == type_id::LIST) { col = col.child(lists_column_view::child_column_index); } else if (col.type().id() == type_id::STRUCT) { col = col.child(0); // Stored cudf_col has only one child if struct } } return col; } gpu::parquet_column_device_view parquet_column_view::get_device_view( rmm::cuda_stream_view stream) const { column_view col = leaf_column_view(); auto desc = gpu::parquet_column_device_view{}; // Zero out all fields desc.stats_dtype = schema_node.stats_dtype; desc.ts_scale = schema_node.ts_scale; if (is_list()) { desc.level_offsets = _dremel_offsets.data(); desc.rep_values = _rep_level.data(); desc.def_values = _def_level.data(); } desc.num_rows = cudf_col.size(); desc.physical_type = static_cast<uint8_t>(physical_type()); desc.level_bits = CompactProtocolReader::NumRequiredBits(max_rep_level()) << 4 | CompactProtocolReader::NumRequiredBits(max_def_level()); desc.nullability = _d_nullability.data(); return desc; } void writer::impl::init_page_fragments(cudf::detail::hostdevice_2dvector<gpu::PageFragment>& frag, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_rows, uint32_t fragment_size) { gpu::InitPageFragments(frag, col_desc, fragment_size, num_rows, stream); frag.device_to_host(stream, true); } void writer::impl::gather_fragment_statistics( device_2dspan<statistics_chunk> frag_stats_chunk, device_2dspan<gpu::PageFragment const> frag, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_fragments) { auto num_columns = col_desc.size(); rmm::device_uvector<statistics_group> frag_stats_group(num_fragments * num_columns, stream); auto frag_stats_group_2dview = device_2dspan<statistics_group>(frag_stats_group.data(), num_columns, num_fragments); gpu::InitFragmentStatistics(frag_stats_group_2dview, frag, col_desc, stream); detail::calculate_group_statistics<detail::io_file_format::PARQUET>( frag_stats_chunk.data(), frag_stats_group.data(), num_fragments * num_columns, stream); stream.synchronize(); } void writer::impl::init_page_sizes(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_columns) { chunks.host_to_device(stream); gpu::InitEncoderPages(chunks, {}, col_desc, num_columns, nullptr, nullptr, 0, stream); chunks.device_to_host(stream, true); } auto build_chunk_dictionaries(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, host_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_rows, rmm::cuda_stream_view stream) { // At this point, we know all chunks and their sizes. We want to allocate dictionaries for each // chunk that can have dictionary auto h_chunks = chunks.host_view().flat_view(); std::vector<rmm::device_uvector<size_type>> dict_data; std::vector<rmm::device_uvector<uint16_t>> dict_index; if (h_chunks.size() == 0) { return std::make_pair(std::move(dict_data), std::move(dict_index)); } // Allocate slots for each chunk std::vector<rmm::device_uvector<gpu::slot_type>> hash_maps_storage; hash_maps_storage.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (col_desc[chunk.col_desc_id].physical_type == Type::BOOLEAN) { chunk.use_dictionary = false; } else { chunk.use_dictionary = true; auto& inserted_map = hash_maps_storage.emplace_back(chunk.num_values, stream); chunk.dict_map_slots = inserted_map.data(); chunk.dict_map_size = inserted_map.size(); } } chunks.host_to_device(stream); gpu::initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream); gpu::populate_chunk_hash_maps(chunks, num_rows, stream); chunks.device_to_host(stream, true); // Make decision about which chunks have dictionary for (auto& ck : h_chunks) { if (not ck.use_dictionary) { continue; } std::tie(ck.use_dictionary, ck.dict_rle_bits) = [&]() { // calculate size of chunk if dictionary is used // If we have N unique values then the idx for the last value is N - 1 and nbits is the number // of bits required to encode indices into the dictionary auto max_dict_index = (ck.num_dict_entries > 0) ? ck.num_dict_entries - 1 : 0; auto nbits = CompactProtocolReader::NumRequiredBits(max_dict_index); // We don't use dictionary if the indices are > 16 bits because that's the maximum bitpacking // bitsize we efficiently support if (nbits > 16) { return std::make_pair(false, 0); } // Only these bit sizes are allowed for RLE encoding because it's compute optimized constexpr auto allowed_bitsizes = std::array<size_type, 6>{1, 2, 4, 8, 12, 16}; // ceil to (1/2/4/8/12/16) auto rle_bits = *std::lower_bound(allowed_bitsizes.begin(), allowed_bitsizes.end(), nbits); auto rle_byte_size = util::div_rounding_up_safe(ck.num_values * rle_bits, 8); auto dict_enc_size = ck.uniq_data_size + rle_byte_size; bool use_dict = (ck.plain_data_size > dict_enc_size); if (not use_dict) { rle_bits = 0; } return std::make_pair(use_dict, rle_bits); }(); } // TODO: (enh) Deallocate hash map storage for chunks that don't use dict and clear pointers. dict_data.reserve(h_chunks.size()); dict_index.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (not chunk.use_dictionary) { continue; } size_t dict_data_size = std::min(MAX_DICT_SIZE, chunk.dict_map_size); auto& inserted_dict_data = dict_data.emplace_back(dict_data_size, stream); auto& inserted_dict_index = dict_index.emplace_back(chunk.num_values, stream); chunk.dict_data = inserted_dict_data.data(); chunk.dict_index = inserted_dict_index.data(); } chunks.host_to_device(stream); gpu::collect_map_entries(chunks.device_view().flat_view(), stream); gpu::get_dictionary_indices(chunks.device_view(), num_rows, stream); return std::make_pair(std::move(dict_data), std::move(dict_index)); } void writer::impl::init_encoder_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::parquet_column_device_view const> col_desc, device_span<gpu::EncPage> pages, statistics_chunk* page_stats, statistics_chunk* frag_stats, size_t max_page_comp_data_size, uint32_t num_columns, uint32_t num_pages, uint32_t num_stats_bfr) { rmm::device_uvector<statistics_merge_group> page_stats_mrg(num_stats_bfr, stream); chunks.host_to_device(stream); InitEncoderPages(chunks, pages, col_desc, num_columns, (num_stats_bfr) ? page_stats_mrg.data() : nullptr, (num_stats_bfr > num_pages) ? page_stats_mrg.data() + num_pages : nullptr, max_page_comp_data_size, stream); if (num_stats_bfr > 0) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats, frag_stats, page_stats_mrg.data(), num_pages, stream); if (num_stats_bfr > num_pages) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats + num_pages, page_stats, page_stats_mrg.data() + num_pages, num_stats_bfr - num_pages, stream); } } stream.synchronize(); } void snappy_compress(device_span<gpu_inflate_input_s const> comp_in, device_span<gpu_inflate_status_s> comp_stat, size_t max_page_uncomp_data_size, rmm::cuda_stream_view stream) { size_t num_comp_pages = comp_in.size(); try { size_t temp_size; nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize( num_comp_pages, max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &temp_size); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in getting snappy compression scratch size"); // Not needed now but nvcomp API makes no promises about future rmm::device_buffer scratch(temp_size, stream); // Analogous to comp_in.srcDevice rmm::device_uvector<void const*> uncompressed_data_ptrs(num_comp_pages, stream); // Analogous to comp_in.srcSize rmm::device_uvector<size_t> uncompressed_data_sizes(num_comp_pages, stream); // Analogous to comp_in.dstDevice rmm::device_uvector<void*> compressed_data_ptrs(num_comp_pages, stream); // Analogous to comp_stat.bytes_written rmm::device_uvector<size_t> compressed_bytes_written(num_comp_pages, stream); // nvcomp does not currently use comp_in.dstSize. Cannot assume that the output will fit in // the space allocated unless one uses the API nvcompBatchedSnappyCompressGetOutputSize() // Prepare the vectors auto comp_it = thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin(), compressed_data_ptrs.begin()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice); }); nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(), uncompressed_data_sizes.data(), max_page_uncomp_data_size, num_comp_pages, scratch.data(), // Not needed rn but future scratch.size(), compressed_data_ptrs.data(), compressed_bytes_written.data(), nvcompBatchedSnappyDefaultOpts, stream.value()); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression"); // nvcomp also doesn't use comp_out.status . It guarantees that given enough output space, // compression will succeed. // The other `comp_out` field is `reserved` which is for internal cuIO debugging and can be 0. thrust::transform(rmm::exec_policy(stream), compressed_bytes_written.begin(), compressed_bytes_written.end(), comp_stat.begin(), [] __device__(size_t size) { gpu_inflate_status_s status{}; status.bytes_written = size; return status; }); return; } catch (...) { // If we reach this then there was an error in compressing so set an error status for each page thrust::for_each(rmm::exec_policy(stream), comp_stat.begin(), comp_stat.end(), [] __device__(gpu_inflate_status_s & stat) { stat.status = 1; }); }; } void writer::impl::encode_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::EncPage> pages, size_t max_page_uncomp_data_size, uint32_t pages_in_batch, uint32_t first_page_in_batch, uint32_t rowgroups_in_batch, uint32_t first_rowgroup, const statistics_chunk* page_stats, const statistics_chunk* chunk_stats) { auto batch_pages = pages.subspan(first_page_in_batch, pages_in_batch); auto batch_pages_stats = (page_stats != nullptr) ? device_span<statistics_chunk const>(page_stats + first_page_in_batch, pages_in_batch) : device_span<statistics_chunk const>(); uint32_t max_comp_pages = (compression_ != parquet::Compression::UNCOMPRESSED) ? pages_in_batch : 0; rmm::device_uvector<gpu_inflate_input_s> compression_input(max_comp_pages, stream); rmm::device_uvector<gpu_inflate_status_s> compression_status(max_comp_pages, stream); device_span<gpu_inflate_input_s> comp_in{compression_input.data(), compression_input.size()}; device_span<gpu_inflate_status_s> comp_stat{compression_status.data(), compression_status.size()}; gpu::EncodePages(batch_pages, comp_in, comp_stat, stream); auto env_use_nvcomp = std::getenv("LIBCUDF_USE_NVCOMP"); bool use_nvcomp = env_use_nvcomp != nullptr ? std::atoi(env_use_nvcomp) : 0; switch (compression_) { case parquet::Compression::SNAPPY: if (use_nvcomp) { snappy_compress(comp_in, comp_stat, max_page_uncomp_data_size, stream); } else { CUDA_TRY(gpu_snap(comp_in.data(), comp_stat.data(), pages_in_batch, stream)); } break; default: break; } // TBD: Not clear if the official spec actually allows dynamically turning off compression at the // chunk-level auto d_chunks_in_batch = chunks.device_view().subspan(first_rowgroup, rowgroups_in_batch); DecideCompression(d_chunks_in_batch.flat_view(), stream); EncodePageHeaders(batch_pages, comp_stat, batch_pages_stats, chunk_stats, stream); GatherPages(d_chunks_in_batch.flat_view(), pages, stream); auto h_chunks_in_batch = chunks.host_view().subspan(first_rowgroup, rowgroups_in_batch); CUDA_TRY(cudaMemcpyAsync(h_chunks_in_batch.data(), d_chunks_in_batch.data(), d_chunks_in_batch.flat_view().size_bytes(), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); } writer::impl::impl(std::unique_ptr<data_sink> sink, parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _mr(mr), stream(stream), compression_(to_parquet_compression(options.get_compression())), stats_granularity_(options.get_stats_level()), int96_timestamps(options.is_enabled_int96_timestamps()), out_sink_(std::move(sink)), single_write_mode(mode == SingleWriteMode::YES) { if (options.get_metadata()) { table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::impl(std::unique_ptr<data_sink> sink, chunked_parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _mr(mr), stream(stream), compression_(to_parquet_compression(options.get_compression())), stats_granularity_(options.get_stats_level()), int96_timestamps(options.is_enabled_int96_timestamps()), single_write_mode(mode == SingleWriteMode::YES), out_sink_(std::move(sink)) { if (options.get_metadata()) { table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::~impl() { close(); } void writer::impl::init_state() { // Write file header file_header_s fhdr; fhdr.magic = parquet_magic; out_sink_->host_write(&fhdr, sizeof(fhdr)); current_chunk_offset = sizeof(file_header_s); } void writer::impl::write(table_view const& table) { CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed"); size_type num_rows = table.num_rows(); if (not table_meta) { table_meta = std::make_unique<table_input_metadata>(table); } // Fill unnamed columns' names in table_meta std::function<void(column_in_metadata&, std::string)> add_default_name = [&](column_in_metadata& col_meta, std::string default_name) { if (col_meta.get_name().empty()) col_meta.set_name(default_name); for (size_type i = 0; i < col_meta.num_children(); ++i) { add_default_name(col_meta.child(i), col_meta.get_name() + "_" + std::to_string(i)); } }; for (size_t i = 0; i < table_meta->column_metadata.size(); ++i) { add_default_name(table_meta->column_metadata[i], "_col" + std::to_string(i)); } auto vec = input_table_to_linked_columns(table); auto schema_tree = construct_schema_tree(vec, *table_meta, single_write_mode, int96_timestamps); // Construct parquet_column_views from the schema tree leaf nodes. std::vector<parquet_column_view> parquet_columns; for (schema_tree_node const& schema_node : schema_tree) { if (schema_node.leaf_column) { parquet_columns.emplace_back(schema_node, schema_tree, stream); } } // Mass allocation of column_device_views for each parquet_column_view std::vector<column_view> cudf_cols; cudf_cols.reserve(parquet_columns.size()); for (auto const& parq_col : parquet_columns) { cudf_cols.push_back(parq_col.cudf_column_view()); } table_view single_streams_table(cudf_cols); size_type num_columns = single_streams_table.num_columns(); std::vector<SchemaElement> this_table_schema(schema_tree.begin(), schema_tree.end()); if (md.version == 0) { md.version = 1; md.num_rows = num_rows; md.column_order_listsize = (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_columns : 0; std::transform(table_meta->user_data.begin(), table_meta->user_data.end(), std::back_inserter(md.key_value_metadata), [](auto const& kv) { return KeyValue{kv.first, kv.second}; }); md.schema = this_table_schema; } else { // verify the user isn't passing mismatched tables CUDF_EXPECTS(md.schema == this_table_schema, "Mismatch in schema between multiple calls to write_chunk"); // increment num rows md.num_rows += num_rows; } // Create table_device_view so that corresponding column_device_view data // can be written into col_desc members auto parent_column_table_device_view = table_device_view::create(single_streams_table, stream); rmm::device_uvector<column_device_view> leaf_column_views(0, stream); // Initialize column description hostdevice_vector<gpu::parquet_column_device_view> col_desc(parquet_columns.size(), stream); std::transform( parquet_columns.begin(), parquet_columns.end(), col_desc.host_ptr(), [&](auto const& pcol) { return pcol.get_device_view(stream); }); // Init page fragments // 5000 is good enough for up to ~200-character strings. Longer strings will start producing // fragments larger than the desired page size -> TODO: keep track of the max fragment size, and // iteratively reduce this value if the largest fragment exceeds the max page size limit (we // ideally want the page size to be below 1MB so as to have enough pages to get good // compression/decompression performance). using cudf::io::parquet::gpu::max_page_fragment_size; uint32_t num_fragments = (uint32_t)((num_rows + max_page_fragment_size - 1) / max_page_fragment_size); cudf::detail::hostdevice_2dvector<gpu::PageFragment> fragments( num_columns, num_fragments, stream); if (num_fragments != 0) { // Move column info to device col_desc.host_to_device(stream); leaf_column_views = create_leaf_column_device_views<gpu::parquet_column_device_view>( col_desc, *parent_column_table_device_view, stream); init_page_fragments(fragments, col_desc, num_rows, max_page_fragment_size); } size_t global_rowgroup_base = md.row_groups.size(); // Decide row group boundaries based on uncompressed data size size_t rowgroup_size = 0; uint32_t num_rowgroups = 0; for (uint32_t f = 0, global_r = global_rowgroup_base, rowgroup_start = 0; f < num_fragments; f++) { size_t fragment_data_size = 0; // Replace with STL algorithm to transform and sum for (auto i = 0; i < num_columns; i++) { fragment_data_size += fragments[i][f].fragment_data_size; } if (f > rowgroup_start && (rowgroup_size + fragment_data_size > max_rowgroup_size_ || (f + 1 - rowgroup_start) * max_page_fragment_size > max_rowgroup_rows_)) { // update schema md.row_groups.resize(md.row_groups.size() + 1); md.row_groups[global_r++].num_rows = (f - rowgroup_start) * max_page_fragment_size; num_rowgroups++; rowgroup_start = f; rowgroup_size = 0; } rowgroup_size += fragment_data_size; if (f + 1 == num_fragments) { // update schema md.row_groups.resize(md.row_groups.size() + 1); md.row_groups[global_r++].num_rows = num_rows - rowgroup_start * max_page_fragment_size; num_rowgroups++; } } // Allocate column chunks and gather fragment statistics rmm::device_uvector<statistics_chunk> frag_stats(0, stream); if (stats_granularity_ != statistics_freq::STATISTICS_NONE) { frag_stats.resize(num_fragments * num_columns, stream); if (frag_stats.size() != 0) { auto frag_stats_2dview = device_2dspan<statistics_chunk>(frag_stats.data(), num_columns, num_fragments); gather_fragment_statistics(frag_stats_2dview, fragments, col_desc, num_fragments); } } // Initialize row groups and column chunks uint32_t num_chunks = num_rowgroups * num_columns; hostdevice_2dvector<gpu::EncColumnChunk> chunks(num_rowgroups, num_columns, stream); for (uint32_t r = 0, global_r = global_rowgroup_base, f = 0, start_row = 0; r < num_rowgroups; r++, global_r++) { uint32_t fragments_in_chunk = (uint32_t)( (md.row_groups[global_r].num_rows + max_page_fragment_size - 1) / max_page_fragment_size); md.row_groups[global_r].total_byte_size = 0; md.row_groups[global_r].columns.resize(num_columns); for (int i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; *ck = {}; ck->col_desc = col_desc.device_ptr() + i; ck->col_desc_id = i; ck->fragments = &fragments.device_view()[i][f]; ck->stats = (frag_stats.size() != 0) ? frag_stats.data() + i * num_fragments + f : nullptr; ck->start_row = start_row; ck->num_rows = (uint32_t)md.row_groups[global_r].num_rows; ck->first_fragment = i * num_fragments + f; auto chunk_fragments = fragments[i].subspan(f, fragments_in_chunk); ck->num_values = std::accumulate(chunk_fragments.begin(), chunk_fragments.end(), 0, [](uint32_t l, auto r) { return l + r.num_values; }); ck->plain_data_size = std::accumulate( chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, gpu::PageFragment frag) { return sum + frag.fragment_data_size; }); md.row_groups[global_r].columns[i].meta_data.type = parquet_columns[i].physical_type(); md.row_groups[global_r].columns[i].meta_data.encodings = {Encoding::PLAIN, Encoding::RLE}; md.row_groups[global_r].columns[i].meta_data.path_in_schema = parquet_columns[i].get_path_in_schema(); md.row_groups[global_r].columns[i].meta_data.codec = UNCOMPRESSED; md.row_groups[global_r].columns[i].meta_data.num_values = ck->num_values; } f += fragments_in_chunk; start_row += (uint32_t)md.row_groups[global_r].num_rows; } auto dict_info_owner = build_chunk_dictionaries(chunks, col_desc, num_rows, stream); for (uint32_t rg = 0, global_rg = global_rowgroup_base; rg < num_rowgroups; rg++, global_rg++) { for (int col = 0; col < num_columns; col++) { if (chunks.host_view()[rg][col].use_dictionary) { md.row_groups[global_rg].columns[col].meta_data.encodings.push_back( Encoding::PLAIN_DICTIONARY); } } } // Build chunk dictionaries and count pages if (num_chunks != 0) { init_page_sizes(chunks, col_desc, num_columns); } // Get the maximum page size across all chunks size_type max_page_uncomp_data_size = std::accumulate(chunks.host_view().flat_view().begin(), chunks.host_view().flat_view().end(), 0, [](uint32_t max_page_size, gpu::EncColumnChunk const& chunk) { return std::max(max_page_size, chunk.max_page_data_size); }); size_t max_page_comp_data_size = 0; if (compression_ != parquet::Compression::UNCOMPRESSED) { auto status = nvcompBatchedSnappyCompressGetMaxOutputChunkSize( max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &max_page_comp_data_size); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "Error in getting compressed size from nvcomp"); } // Initialize batches of rowgroups to encode (mainly to limit peak memory usage) std::vector<uint32_t> batch_list; uint32_t num_pages = 0; size_t max_bytes_in_batch = 1024 * 1024 * 1024; // 1GB - TBD: Tune this size_t max_uncomp_bfr_size = 0; size_t max_comp_bfr_size = 0; size_t max_chunk_bfr_size = 0; uint32_t max_pages_in_batch = 0; size_t bytes_in_batch = 0; size_t comp_bytes_in_batch = 0; for (uint32_t r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) { size_t rowgroup_size = 0; size_t comp_rowgroup_size = 0; if (r < num_rowgroups) { for (int i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; ck->first_page = num_pages; num_pages += ck->num_pages; pages_in_batch += ck->num_pages; rowgroup_size += ck->bfr_size; ck->compressed_size = ck->ck_stat_size + ck->page_headers_size + max_page_comp_data_size * ck->num_pages; comp_rowgroup_size += ck->compressed_size; max_chunk_bfr_size = std::max(max_chunk_bfr_size, (size_t)std::max(ck->bfr_size, ck->compressed_size)); } } // TBD: We may want to also shorten the batch if we have enough pages (not just based on size) if ((r == num_rowgroups) || (groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) { max_uncomp_bfr_size = std::max(max_uncomp_bfr_size, bytes_in_batch); max_comp_bfr_size = std::max(max_comp_bfr_size, comp_bytes_in_batch); max_pages_in_batch = std::max(max_pages_in_batch, pages_in_batch); if (groups_in_batch != 0) { batch_list.push_back(groups_in_batch); groups_in_batch = 0; } bytes_in_batch = 0; comp_bytes_in_batch = 0; pages_in_batch = 0; } bytes_in_batch += rowgroup_size; comp_bytes_in_batch += comp_rowgroup_size; groups_in_batch++; } // Clear compressed buffer size if compression has been turned off if (compression_ == parquet::Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; } // Initialize data pointers in batch uint32_t num_stats_bfr = (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0; rmm::device_buffer uncomp_bfr(max_uncomp_bfr_size, stream); rmm::device_buffer comp_bfr(max_comp_bfr_size, stream); rmm::device_uvector<gpu::EncPage> pages(num_pages, stream); // This contains stats for both the pages and the rowgroups. TODO: make them separate. rmm::device_uvector<statistics_chunk> page_stats(num_stats_bfr, stream); for (uint32_t b = 0, r = 0; b < (uint32_t)batch_list.size(); b++) { uint8_t* bfr = static_cast<uint8_t*>(uncomp_bfr.data()); uint8_t* bfr_c = static_cast<uint8_t*>(comp_bfr.data()); for (uint32_t j = 0; j < batch_list[b]; j++, r++) { for (int i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; ck->uncompressed_bfr = bfr; ck->compressed_bfr = bfr_c; bfr += ck->bfr_size; bfr_c += ck->compressed_size; } } } if (num_pages != 0) { init_encoder_pages(chunks, col_desc, {pages.data(), pages.size()}, (num_stats_bfr) ? page_stats.data() : nullptr, (num_stats_bfr) ? frag_stats.data() : nullptr, max_page_comp_data_size, num_columns, num_pages, num_stats_bfr); } pinned_buffer<uint8_t> host_bfr{nullptr, cudaFreeHost}; // Encode row groups in batches for (uint32_t b = 0, r = 0, global_r = global_rowgroup_base; b < (uint32_t)batch_list.size(); b++) { // Count pages in this batch uint32_t rnext = r + batch_list[b]; uint32_t first_page_in_batch = chunks[r][0].first_page; uint32_t first_page_in_next_batch = (rnext < num_rowgroups) ? chunks[rnext][0].first_page : num_pages; uint32_t pages_in_batch = first_page_in_next_batch - first_page_in_batch; // device_span<gpu::EncPage> batch_pages{pages.data() + first_page_in_batch, } encode_pages( chunks, {pages.data(), pages.size()}, max_page_uncomp_data_size, pages_in_batch, first_page_in_batch, batch_list[b], r, (stats_granularity_ == statistics_freq::STATISTICS_PAGE) ? page_stats.data() : nullptr, (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? page_stats.data() + num_pages : nullptr); std::vector<std::future<void>> write_tasks; for (; r < rnext; r++, global_r++) { for (auto i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; uint8_t* dev_bfr; if (ck->is_compressed) { md.row_groups[global_r].columns[i].meta_data.codec = compression_; dev_bfr = ck->compressed_bfr; } else { dev_bfr = ck->uncompressed_bfr; } if (out_sink_->is_device_write_preferred(ck->compressed_size)) { // let the writer do what it wants to retrieve the data from the gpu. write_tasks.push_back( out_sink_->device_write_async(dev_bfr + ck->ck_stat_size, ck->compressed_size, stream)); // we still need to do a (much smaller) memcpy for the statistics. if (ck->ck_stat_size != 0) { md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(ck->ck_stat_size); CUDA_TRY( cudaMemcpyAsync(md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(), dev_bfr, ck->ck_stat_size, cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); } } else { if (!host_bfr) { host_bfr = pinned_buffer<uint8_t>{[](size_t size) { uint8_t* ptr = nullptr; CUDA_TRY(cudaMallocHost(&ptr, size)); return ptr; }(max_chunk_bfr_size), cudaFreeHost}; } // copy the full data CUDA_TRY(cudaMemcpyAsync(host_bfr.get(), dev_bfr, ck->ck_stat_size + ck->compressed_size, cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); out_sink_->host_write(host_bfr.get() + ck->ck_stat_size, ck->compressed_size); if (ck->ck_stat_size != 0) { md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(ck->ck_stat_size); memcpy(md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(), host_bfr.get(), ck->ck_stat_size); } } md.row_groups[global_r].total_byte_size += ck->compressed_size; md.row_groups[global_r].columns[i].meta_data.data_page_offset = current_chunk_offset + ((ck->use_dictionary) ? ck->dictionary_size : 0); md.row_groups[global_r].columns[i].meta_data.dictionary_page_offset = (ck->use_dictionary) ? current_chunk_offset : 0; md.row_groups[global_r].columns[i].meta_data.total_uncompressed_size = ck->bfr_size; md.row_groups[global_r].columns[i].meta_data.total_compressed_size = ck->compressed_size; current_chunk_offset += ck->compressed_size; } } for (auto const& task : write_tasks) { task.wait(); } } } std::unique_ptr<std::vector<uint8_t>> writer::impl::close( std::string const& column_chunks_file_path) { if (closed) { return nullptr; } closed = true; CompactProtocolWriter cpw(&buffer_); file_ender_s fendr; buffer_.resize(0); fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); fendr.magic = parquet_magic; out_sink_->host_write(buffer_.data(), buffer_.size()); out_sink_->host_write(&fendr, sizeof(fendr)); out_sink_->flush(); // Optionally output raw file metadata with the specified column chunk file path if (column_chunks_file_path.length() > 0) { file_header_s fhdr = {parquet_magic}; buffer_.resize(0); buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(&fhdr), reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr)); for (auto& rowgroup : md.row_groups) { for (auto& col : rowgroup.columns) { col.file_path = column_chunks_file_path; } } fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(&fendr), reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(buffer_)); } else { return {nullptr}; } } // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } writer::writer(std::unique_ptr<data_sink> sink, chunked_parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write(table_view const& table) { _impl->write(table); } // Forward to implementation std::unique_ptr<std::vector<uint8_t>> writer::close(std::string const& column_chunks_file_path) { return _impl->close(column_chunks_file_path); } std::unique_ptr<std::vector<uint8_t>> writer::merge_rowgroup_metadata( const std::vector<std::unique_ptr<std::vector<uint8_t>>>& metadata_list) { std::vector<uint8_t> output; CompactProtocolWriter cpw(&output); FileMetaData md; md.row_groups.reserve(metadata_list.size()); for (const auto& blob : metadata_list) { CompactProtocolReader cpreader( blob.get()->data(), std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s)); cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header if (md.num_rows == 0) { cpreader.read(&md); } else { FileMetaData tmp; cpreader.read(&tmp); md.row_groups.insert(md.row_groups.end(), std::make_move_iterator(tmp.row_groups.begin()), std::make_move_iterator(tmp.row_groups.end())); md.num_rows += tmp.num_rows; } } // Reader doesn't currently populate column_order, so infer it here if (md.row_groups.size() != 0) { uint32_t num_columns = static_cast<uint32_t>(md.row_groups[0].columns.size()); md.column_order_listsize = (num_columns > 0 && md.row_groups[0].columns[0].meta_data.statistics_blob.size()) ? num_columns : 0; } // Thrift-encode the resulting output file_header_s fhdr; file_ender_s fendr; fhdr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<const uint8_t*>(&fhdr), reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr)); fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); fendr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<const uint8_t*>(&fendr), reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(output)); } } // namespace parquet } // namespace detail } // namespace io } // namespace cudf
d7b1409b63a9355b2e0a861854eab71b185250c9.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_topk.cu * * @brief Simple test driver program for computing Topk. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <utility> #include <iostream> #include <cstdlib> #include <algorithm> #include <fstream> #include <map> #include "EvqueueManager.h" // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> // Degree Centrality includes #include <gunrock/app/topk/topk_enactor.cuh> #include <gunrock/app/topk/topk_problem.cuh> // Operator includes #include <gunrock/oprtr/filter/kernel.cuh> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::topk; /****************************************************************************** * Defines, constants, globals ******************************************************************************/ bool g_verbose; bool g_undirected; bool g_quick; bool g_stream_from_host; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "\ntest_topk <graph type> <graph type args> [--top=<K_value>] [--device=<device_index>] " "[--instrumented] [--quick] " "[--v]\n" "\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n" " edges from stdin (or from the optionally-specified file).\n" " k value top K value.\n" " --device=<device_index> Set GPU device for running the graph primitive.\n" " --instrumented If set then kernels keep track of queue-search_depth\n" " and barrier duty (a relative indicator of load imbalance.)\n" " --quick If set will skip the CPU validation code.\n"); } /** * @brief displays the top K results * */ template< typename VertexId, typename Value, typename SizeT> void DisplaySolution( VertexId *h_node_id, Value *h_degrees_i, Value *h_degrees_o, SizeT num_nodes) { fflush(stdout); // at most display the first 100 results if (num_nodes > 100) num_nodes = 100; printf("==> top %d centrality nodes:\n", num_nodes); for (SizeT iter = 0; iter < num_nodes; ++iter) printf("%d %d %d\n", h_node_id[iter], h_degrees_i[iter], h_degrees_o[iter]); } /****************************************************************************** * Degree Centrality Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference TOPK implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on */ struct compare_second_only { template <typename T1, typename T2> bool operator()(const std::pair<T1, T2>& p1, const std::pair<T1, T2>& p2) { return p1.second > p2. second; } }; template< typename VertexId, typename Value, typename SizeT> void SimpleReferenceTopK( const Csr<VertexId, Value, SizeT> &graph_original, const Csr<VertexId, Value, SizeT> &graph_reversed, VertexId *ref_node_id, Value *ref_degrees, SizeT top_nodes) { printf("CPU reference test.\n"); CpuTimer cpu_timer; // malloc degree centrality spaces Value *ref_degrees_original = (Value*)malloc(sizeof(Value) * graph_original.nodes); Value *ref_degrees_reversed = (Value*)malloc(sizeof(Value) * graph_reversed.nodes); // store reference output results std::vector< pair<int, int> > results; // calculations for (SizeT node = 0; node < graph_original.nodes; ++node) { ref_degrees_original[node] = graph_original.row_offsets[node+1] - graph_original.row_offsets[node]; ref_degrees_reversed[node] = graph_reversed.row_offsets[node+1] - graph_reversed.row_offsets[node]; } cpu_timer.Start(); // add ingoing degrees and outgoing degrees together for (SizeT node = 0; node < graph_original.nodes; ++node) { ref_degrees_original[node] = ref_degrees_original[node] + ref_degrees_reversed[node]; results.push_back( std::make_pair (node, ref_degrees_original[node]) ); } // pair sort according to second elements - degree centrality std::stable_sort(results.begin(), results.end(), compare_second_only()); for (SizeT itr = 0; itr < top_nodes; ++itr) { ref_node_id[itr] = results[itr].first; ref_degrees[itr] = results[itr].second; } cpu_timer.Stop(); float elapsed_cpu = cpu_timer.ElapsedMillis(); printf("==> CPU Degree Centrality finished in %lf msec.\n", elapsed_cpu); // clean up if neccessary if (ref_degrees_original) { free(ref_degrees_original); } if (ref_degrees_reversed) { free(ref_degrees_reversed); } results.clear(); } /** * @brief Run TopK tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] graph_original Reference to the CSR graph we process on * @param[in] graph_reversed Reference to the inversed CSR graph we process on * @param[in] args Reference to the command line arguments * @param[in] max_grid_size Maximum CTA occupancy * @param[in] num_gpus Number of GPUs * @param[in] top_nodes Number of nodes to process for Top-K algorithm * @param[in] context CudaContext for moderngpu library * */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT> void RunTests( const Csr<VertexId, Value, SizeT> &graph_original, const Csr<VertexId, Value, SizeT> &graph_reversed, CommandLineArgs &args, int max_grid_size, int num_gpus, int top_nodes, CudaContext &context) { // define the problem data structure for graph primitive typedef TOPKProblem<VertexId, SizeT, Value> Problem; // INSTRUMENT specifies whether we want to keep such statistical data // Allocate TopK enactor map TOPKEnactor<INSTRUMENT> topk_enactor(g_verbose); // allocate problem on GPU // create a pointer of the TOPKProblem type Problem *topk_problem = new Problem; // reset top_nodes if input k > total number of nodes if (top_nodes > graph_original.nodes) { top_nodes = graph_original.nodes; } // malloc host memory VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); VertexId *ref_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); Value *h_degrees_i = (Value*)malloc(sizeof(Value) * top_nodes); Value *h_degrees_o = (Value*)malloc(sizeof(Value) * top_nodes); Value *ref_degrees = (Value*)malloc(sizeof(Value) * top_nodes); // copy data from CPU to GPU // initialize data members in DataSlice for graph util::GRError(topk_problem->Init( g_stream_from_host, graph_original, graph_reversed, num_gpus), "Problem TOPK Initialization Failed", __FILE__, __LINE__); // perform topk degree centrality calculations GpuTimer gpu_timer; // Record the kernel running time struct timeval start, end; for (int iter = 0; iter < 1000; ++iter) { std::cout << "Iteration " << iter << std::endl; // reset values in DataSlice for graph util::GRError(topk_problem->Reset( topk_enactor.GetFrontierType()), "TOPK Problem Data Reset Failed", __FILE__, __LINE__); gpu_timer.Start(); gettimeofday(&start, NULL); // launch topk enactor util::GRError(topk_enactor.template Enact<Problem>( topk_problem, top_nodes, max_grid_size), "TOPK Problem Enact Failed", __FILE__, __LINE__); gpu_timer.Stop(); gettimeofday(&end, NULL); std::cerr << "[TOPK] ---- " << (end.tv_sec - start.tv_sec)*1000000+(end.tv_usec - start.tv_usec) << std::endl; EvqueueSynch(); } float elapsed_gpu = gpu_timer.ElapsedMillis(); printf("==> GPU TopK Degree Centrality finished in %lf msec.\n", elapsed_gpu); // copy out results back to CPU from GPU using Extract util::GRError(topk_problem->Extract( h_node_id, h_degrees_i, h_degrees_o, top_nodes), "TOPK Problem Data Extraction Failed", __FILE__, __LINE__); // display solution DisplaySolution( h_node_id, h_degrees_i, h_degrees_o, top_nodes); // validation SimpleReferenceTopK( graph_original, graph_reversed, ref_node_id, ref_degrees, top_nodes); int error_num = CompareResults(h_node_id, ref_node_id, top_nodes, true); if (error_num > 0) { printf("INCOREECT! %d error(s) occured. \n", error_num); } printf("\n"); // cleanup if neccessary if (topk_problem) { delete topk_problem; } if (h_node_id) { free(h_node_id); } if (h_degrees_i) { free(h_degrees_i); } if (h_degrees_o) { free(h_degrees_o); } hipDeviceSynchronize(); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph_original Reference to the CSR graph we process on * @param[in] graph_reversed Reference to the inversed CSR graph we process on * @param[in] args Reference to the command line arguments * @param[in] top_nodes Number of nodes to process for Top-K algorithm * @param[in] context CudaContext for moderngpu library */ template < typename VertexId, typename Value, typename SizeT> void RunTests( Csr<VertexId, Value, SizeT> &graph_original, Csr<VertexId, Value, SizeT> &graph_reversed, CommandLineArgs &args, SizeT top_nodes, CudaContext &context) { bool instrumented = false; int max_grid_size = 0; int num_gpus = 1; instrumented = args.CheckCmdLineFlag("instrumented"); g_quick = args.CheckCmdLineFlag("quick"); g_verbose = args.CheckCmdLineFlag("v"); if (instrumented) { RunTests<VertexId, Value, SizeT, true>( graph_original, graph_reversed, args, max_grid_size, num_gpus, top_nodes, context); } else { RunTests<VertexId, Value, SizeT, false>( graph_original, graph_reversed, args, max_grid_size, num_gpus, top_nodes, context); } } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { EvqueueCreate(2); CommandLineArgs args(argc, argv); if ((argc < 2) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } //DeviceInit(args); //hipSetDeviceFlags(hipDeviceMapHost); int dev = 0; int top_nodes; args.GetCmdLineArgument("device", dev); args.GetCmdLineArgument("top", top_nodes); mgpu::ContextPtr context = mgpu::CreateCudaDevice(dev); //srand(0); // Presently deterministic //srand(time(NULL)); // Parse graph-contruction params g_undirected = false; std::string graph_type = argv[1]; int flags = args.ParsedArgc(); int graph_args = argc - flags - 1; if (graph_args < 1) { Usage(); return 1; } // // Construct graph and perform // if (graph_type == "market") { // Matrix-market coordinate-formatted graph file typedef int VertexId; //!< Use as the node identifier type typedef int Value; //!< Use as the value type typedef int SizeT; //!< Use as the graph size type Csr<VertexId, Value, SizeT> csr_original(false); Csr<VertexId, Value, SizeT> csr_reversed(false); // Default value for stream_from_host is false if (graph_args < 1) { Usage(); return 1; } char *market_filename = (graph_args == 2) ? argv[2] : NULL; // BuildMarketGraph() reads a mtx file into CSR data structure // Template argumet = true because the graph has edge weights // read in non-inversed graph if (graphio::BuildMarketGraph<true>( market_filename, csr_original, g_undirected, false) != 0) // original graph { return 1; } // read in inversed graph if (graphio::BuildMarketGraph<true>( market_filename, csr_reversed, g_undirected, true) != 0) // reversed graph { return 1; } //csr_original.DisplayGraph(); //csr_reversed.DisplayGraph(); // run gpu tests RunTests(csr_original, csr_reversed, args, top_nodes, *context); } else { // unknown graph type fprintf(stderr, "Unspecified graph type\n"); return 1; } EvqueueDestroy(); return 0; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
d7b1409b63a9355b2e0a861854eab71b185250c9.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_topk.cu * * @brief Simple test driver program for computing Topk. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <utility> #include <iostream> #include <cstdlib> #include <algorithm> #include <fstream> #include <map> #include "EvqueueManager.h" // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> // Degree Centrality includes #include <gunrock/app/topk/topk_enactor.cuh> #include <gunrock/app/topk/topk_problem.cuh> // Operator includes #include <gunrock/oprtr/filter/kernel.cuh> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::topk; /****************************************************************************** * Defines, constants, globals ******************************************************************************/ bool g_verbose; bool g_undirected; bool g_quick; bool g_stream_from_host; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "\ntest_topk <graph type> <graph type args> [--top=<K_value>] [--device=<device_index>] " "[--instrumented] [--quick] " "[--v]\n" "\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n" " edges from stdin (or from the optionally-specified file).\n" " k value top K value.\n" " --device=<device_index> Set GPU device for running the graph primitive.\n" " --instrumented If set then kernels keep track of queue-search_depth\n" " and barrier duty (a relative indicator of load imbalance.)\n" " --quick If set will skip the CPU validation code.\n"); } /** * @brief displays the top K results * */ template< typename VertexId, typename Value, typename SizeT> void DisplaySolution( VertexId *h_node_id, Value *h_degrees_i, Value *h_degrees_o, SizeT num_nodes) { fflush(stdout); // at most display the first 100 results if (num_nodes > 100) num_nodes = 100; printf("==> top %d centrality nodes:\n", num_nodes); for (SizeT iter = 0; iter < num_nodes; ++iter) printf("%d %d %d\n", h_node_id[iter], h_degrees_i[iter], h_degrees_o[iter]); } /****************************************************************************** * Degree Centrality Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference TOPK implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on */ struct compare_second_only { template <typename T1, typename T2> bool operator()(const std::pair<T1, T2>& p1, const std::pair<T1, T2>& p2) { return p1.second > p2. second; } }; template< typename VertexId, typename Value, typename SizeT> void SimpleReferenceTopK( const Csr<VertexId, Value, SizeT> &graph_original, const Csr<VertexId, Value, SizeT> &graph_reversed, VertexId *ref_node_id, Value *ref_degrees, SizeT top_nodes) { printf("CPU reference test.\n"); CpuTimer cpu_timer; // malloc degree centrality spaces Value *ref_degrees_original = (Value*)malloc(sizeof(Value) * graph_original.nodes); Value *ref_degrees_reversed = (Value*)malloc(sizeof(Value) * graph_reversed.nodes); // store reference output results std::vector< pair<int, int> > results; // calculations for (SizeT node = 0; node < graph_original.nodes; ++node) { ref_degrees_original[node] = graph_original.row_offsets[node+1] - graph_original.row_offsets[node]; ref_degrees_reversed[node] = graph_reversed.row_offsets[node+1] - graph_reversed.row_offsets[node]; } cpu_timer.Start(); // add ingoing degrees and outgoing degrees together for (SizeT node = 0; node < graph_original.nodes; ++node) { ref_degrees_original[node] = ref_degrees_original[node] + ref_degrees_reversed[node]; results.push_back( std::make_pair (node, ref_degrees_original[node]) ); } // pair sort according to second elements - degree centrality std::stable_sort(results.begin(), results.end(), compare_second_only()); for (SizeT itr = 0; itr < top_nodes; ++itr) { ref_node_id[itr] = results[itr].first; ref_degrees[itr] = results[itr].second; } cpu_timer.Stop(); float elapsed_cpu = cpu_timer.ElapsedMillis(); printf("==> CPU Degree Centrality finished in %lf msec.\n", elapsed_cpu); // clean up if neccessary if (ref_degrees_original) { free(ref_degrees_original); } if (ref_degrees_reversed) { free(ref_degrees_reversed); } results.clear(); } /** * @brief Run TopK tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] graph_original Reference to the CSR graph we process on * @param[in] graph_reversed Reference to the inversed CSR graph we process on * @param[in] args Reference to the command line arguments * @param[in] max_grid_size Maximum CTA occupancy * @param[in] num_gpus Number of GPUs * @param[in] top_nodes Number of nodes to process for Top-K algorithm * @param[in] context CudaContext for moderngpu library * */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT> void RunTests( const Csr<VertexId, Value, SizeT> &graph_original, const Csr<VertexId, Value, SizeT> &graph_reversed, CommandLineArgs &args, int max_grid_size, int num_gpus, int top_nodes, CudaContext &context) { // define the problem data structure for graph primitive typedef TOPKProblem<VertexId, SizeT, Value> Problem; // INSTRUMENT specifies whether we want to keep such statistical data // Allocate TopK enactor map TOPKEnactor<INSTRUMENT> topk_enactor(g_verbose); // allocate problem on GPU // create a pointer of the TOPKProblem type Problem *topk_problem = new Problem; // reset top_nodes if input k > total number of nodes if (top_nodes > graph_original.nodes) { top_nodes = graph_original.nodes; } // malloc host memory VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); VertexId *ref_node_id = (VertexId*)malloc(sizeof(VertexId) * top_nodes); Value *h_degrees_i = (Value*)malloc(sizeof(Value) * top_nodes); Value *h_degrees_o = (Value*)malloc(sizeof(Value) * top_nodes); Value *ref_degrees = (Value*)malloc(sizeof(Value) * top_nodes); // copy data from CPU to GPU // initialize data members in DataSlice for graph util::GRError(topk_problem->Init( g_stream_from_host, graph_original, graph_reversed, num_gpus), "Problem TOPK Initialization Failed", __FILE__, __LINE__); // perform topk degree centrality calculations GpuTimer gpu_timer; // Record the kernel running time struct timeval start, end; for (int iter = 0; iter < 1000; ++iter) { std::cout << "Iteration " << iter << std::endl; // reset values in DataSlice for graph util::GRError(topk_problem->Reset( topk_enactor.GetFrontierType()), "TOPK Problem Data Reset Failed", __FILE__, __LINE__); gpu_timer.Start(); gettimeofday(&start, NULL); // launch topk enactor util::GRError(topk_enactor.template Enact<Problem>( topk_problem, top_nodes, max_grid_size), "TOPK Problem Enact Failed", __FILE__, __LINE__); gpu_timer.Stop(); gettimeofday(&end, NULL); std::cerr << "[TOPK] ---- " << (end.tv_sec - start.tv_sec)*1000000+(end.tv_usec - start.tv_usec) << std::endl; EvqueueSynch(); } float elapsed_gpu = gpu_timer.ElapsedMillis(); printf("==> GPU TopK Degree Centrality finished in %lf msec.\n", elapsed_gpu); // copy out results back to CPU from GPU using Extract util::GRError(topk_problem->Extract( h_node_id, h_degrees_i, h_degrees_o, top_nodes), "TOPK Problem Data Extraction Failed", __FILE__, __LINE__); // display solution DisplaySolution( h_node_id, h_degrees_i, h_degrees_o, top_nodes); // validation SimpleReferenceTopK( graph_original, graph_reversed, ref_node_id, ref_degrees, top_nodes); int error_num = CompareResults(h_node_id, ref_node_id, top_nodes, true); if (error_num > 0) { printf("INCOREECT! %d error(s) occured. \n", error_num); } printf("\n"); // cleanup if neccessary if (topk_problem) { delete topk_problem; } if (h_node_id) { free(h_node_id); } if (h_degrees_i) { free(h_degrees_i); } if (h_degrees_o) { free(h_degrees_o); } cudaDeviceSynchronize(); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph_original Reference to the CSR graph we process on * @param[in] graph_reversed Reference to the inversed CSR graph we process on * @param[in] args Reference to the command line arguments * @param[in] top_nodes Number of nodes to process for Top-K algorithm * @param[in] context CudaContext for moderngpu library */ template < typename VertexId, typename Value, typename SizeT> void RunTests( Csr<VertexId, Value, SizeT> &graph_original, Csr<VertexId, Value, SizeT> &graph_reversed, CommandLineArgs &args, SizeT top_nodes, CudaContext &context) { bool instrumented = false; int max_grid_size = 0; int num_gpus = 1; instrumented = args.CheckCmdLineFlag("instrumented"); g_quick = args.CheckCmdLineFlag("quick"); g_verbose = args.CheckCmdLineFlag("v"); if (instrumented) { RunTests<VertexId, Value, SizeT, true>( graph_original, graph_reversed, args, max_grid_size, num_gpus, top_nodes, context); } else { RunTests<VertexId, Value, SizeT, false>( graph_original, graph_reversed, args, max_grid_size, num_gpus, top_nodes, context); } } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { EvqueueCreate(2); CommandLineArgs args(argc, argv); if ((argc < 2) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } //DeviceInit(args); //cudaSetDeviceFlags(cudaDeviceMapHost); int dev = 0; int top_nodes; args.GetCmdLineArgument("device", dev); args.GetCmdLineArgument("top", top_nodes); mgpu::ContextPtr context = mgpu::CreateCudaDevice(dev); //srand(0); // Presently deterministic //srand(time(NULL)); // Parse graph-contruction params g_undirected = false; std::string graph_type = argv[1]; int flags = args.ParsedArgc(); int graph_args = argc - flags - 1; if (graph_args < 1) { Usage(); return 1; } // // Construct graph and perform // if (graph_type == "market") { // Matrix-market coordinate-formatted graph file typedef int VertexId; //!< Use as the node identifier type typedef int Value; //!< Use as the value type typedef int SizeT; //!< Use as the graph size type Csr<VertexId, Value, SizeT> csr_original(false); Csr<VertexId, Value, SizeT> csr_reversed(false); // Default value for stream_from_host is false if (graph_args < 1) { Usage(); return 1; } char *market_filename = (graph_args == 2) ? argv[2] : NULL; // BuildMarketGraph() reads a mtx file into CSR data structure // Template argumet = true because the graph has edge weights // read in non-inversed graph if (graphio::BuildMarketGraph<true>( market_filename, csr_original, g_undirected, false) != 0) // original graph { return 1; } // read in inversed graph if (graphio::BuildMarketGraph<true>( market_filename, csr_reversed, g_undirected, true) != 0) // reversed graph { return 1; } //csr_original.DisplayGraph(); //csr_reversed.DisplayGraph(); // run gpu tests RunTests(csr_original, csr_reversed, args, top_nodes, *context); } else { // unknown graph type fprintf(stderr, "Unspecified graph type\n"); return 1; } EvqueueDestroy(); return 0; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
f6a9aa7baa993b3b70766fc88c14ed9c6c1957fb.hip
// !!! This is a file automatically generated by hipify!!! #include <cfloat> #include <chrono> #include <hip/hip_runtime_api.h> #include <iostream> using namespace std; /////////////////////////////////////////////////////////////////////////////////////////////////////////// hipError_t SAFE_CALL (hipError_t result) { if(result != hipSuccess) { printf("CUDA error: %s at call #CallInstruction\n", hipGetErrorString(result)); throw "error in CUDA API function, aborting..."; } return result; } hipError_t SAFE_KERNEL_CALL (hipError_t result) { if(result != hipSuccess) { printf("CUDA error in kernel launch: %s at kernel #KernelCallInstruction\n", hipGetErrorString(result)); throw "error in CUDA kernel launch, aborting..."; } result = hipDeviceSynchronize(); if(result != hipSuccess) { printf("CUDA error in kernel execution: %s at kernel \"#KernelCallInstruction\"\n", hipGetErrorString(result)); throw "error in CUDA kernel execution, aborting..."; } return result; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void gather(int *ptrs, int *connections, int *out_ids, int vertices_count, int *data, int *result) { const long long src_id = (blockIdx.x * blockDim.x + threadIdx.x) / 32; if (src_id < vertices_count) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for (register int cur_edge = threadIdx.x % 32; cur_edge < connections_count; cur_edge += 32) { int dst_id = out_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////// // void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) // { // const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; // if (src_id < vertices_count) // { // const int first_edge_ptr = ptrs[src_id]; // const int connections_count = connections[src_id]; // //connections_count = ptrs[src_id + 1] - ptrs[src_id]; // for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) // { // // first_edge_ptr + cur_edge - // int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; // int val = data[dst_id]; // result[first_edge_ptr + cur_edge] = val; // // , : // /* BFS // int src_level = data[src_id]; // int dst_level = data[dst_id]; // if((src_level == current_level) && (dst_level == UNVISITED_VERTEX)) // { // data[dst_id] = current_level + 1; // } // */ // /* SSSP // float weight = outgoing_weights[first_edge_ptr + cur_edge]; // float src_weight = data[src_id]; // float dst_weight = data[dst_id]; // if(dst_weight > src_weight + weight) // { // data[dst_id] = src_weight + weight; // } // */ // } // } // } int main() { int vertices_count = 1024*1024; int *ptrs = new int[vertices_count]; int *data = new int[vertices_count]; int *connections = new int[vertices_count]; int pos = 0; for(int i = 0; i < vertices_count; i++) // TODO (bonus) "" { ptrs[i] = pos; connections[i] = 16 + rand()%32; pos += connections[i]; data[i] = rand(); } int edges_count = pos; int *outgoing_ids = new int[edges_count]; int *result = new int[edges_count]; for(int i = 0; i < edges_count; i++) { outgoing_ids[i] = rand()%vertices_count; } int *dev_ptrs; int *dev_connections; int *dev_outgoing_ids; int *dev_data; int *dev_result; hipMalloc((void**)&dev_ptrs, vertices_count*sizeof(int)); hipMalloc((void**)&dev_connections, vertices_count*sizeof(int)); hipMalloc((void**)&dev_data, vertices_count*sizeof(int)); hipMalloc((void**)&dev_outgoing_ids, edges_count*sizeof(int)); hipMalloc((void**)&dev_result, edges_count*sizeof(int)); SAFE_CALL( hipMemcpy(dev_ptrs, ptrs, vertices_count * sizeof(int), hipMemcpyHostToDevice) ); SAFE_CALL( hipMemcpy(dev_connections, connections, vertices_count * sizeof(int), hipMemcpyHostToDevice) ); SAFE_CALL( hipMemcpy(dev_data, data, vertices_count * sizeof(int), hipMemcpyHostToDevice) ); SAFE_CALL( hipMemcpy(dev_outgoing_ids, outgoing_ids, edges_count * sizeof(int), hipMemcpyHostToDevice) ); dim3 compute_threads(1024); dim3 compute_blocks( 32*(vertices_count - 1) / compute_threads.x + 1); for (int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( gather), dim3(compute_blocks), dim3(compute_threads), 0, 0, dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result); auto end = std::chrono::steady_clock::now(); // TODO ? std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } int *copy_device_result = new int[edges_count]; // TODO copy SAFE_CALL(hipMemcpy(copy_device_result, dev_result, edges_count * sizeof(int), hipMemcpyDeviceToHost)); for (int src_id = 0; src_id < vertices_count; src_id++) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for (register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } // TODO check int errors_count = 0; for (int i = 0; i < edges_count; i++) { if (result[i] != copy_device_result[i]) errors_count++; } cout << errors_count << endl; // TODO 3 ? // TODO , // TODO // TODO (bonus) BFS ( ) hipFree(dev_data); hipFree(dev_ptrs); hipFree(dev_connections); hipFree(dev_result); hipFree(dev_outgoing_ids); delete[]result; delete[]data; delete[]ptrs; delete[]outgoing_ids; delete[]connections; return 0; }
f6a9aa7baa993b3b70766fc88c14ed9c6c1957fb.cu
#include <cfloat> #include <chrono> #include <cuda_profiler_api.h> #include <iostream> using namespace std; /////////////////////////////////////////////////////////////////////////////////////////////////////////// cudaError_t SAFE_CALL (cudaError_t result) { if(result != cudaSuccess) { printf("CUDA error: %s at call #CallInstruction\n", cudaGetErrorString(result)); throw "error in CUDA API function, aborting..."; } return result; } cudaError_t SAFE_KERNEL_CALL (cudaError_t result) { if(result != cudaSuccess) { printf("CUDA error in kernel launch: %s at kernel #KernelCallInstruction\n", cudaGetErrorString(result)); throw "error in CUDA kernel launch, aborting..."; } result = cudaDeviceSynchronize(); if(result != cudaSuccess) { printf("CUDA error in kernel execution: %s at kernel \"#KernelCallInstruction\"\n", cudaGetErrorString(result)); throw "error in CUDA kernel execution, aborting..."; } return result; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void gather(int *ptrs, int *connections, int *out_ids, int vertices_count, int *data, int *result) { const long long src_id = (blockIdx.x * blockDim.x + threadIdx.x) / 32; if (src_id < vertices_count) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for (register int cur_edge = threadIdx.x % 32; cur_edge < connections_count; cur_edge += 32) { int dst_id = out_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////// // void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) // { // const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; // if (src_id < vertices_count) // { // const int first_edge_ptr = ptrs[src_id]; // const int connections_count = connections[src_id]; // //connections_count = ptrs[src_id + 1] - ptrs[src_id]; // for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) // { // // first_edge_ptr + cur_edge - индекс текущего ребра в массивах // int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; // int val = data[dst_id]; // result[first_edge_ptr + cur_edge] = val; // // данную программу можно легко переделать во многие графовые алгоритмы, например: // /* BFS // int src_level = data[src_id]; // int dst_level = data[dst_id]; // if((src_level == current_level) && (dst_level == UNVISITED_VERTEX)) // { // data[dst_id] = current_level + 1; // } // */ // /* SSSP // float weight = outgoing_weights[first_edge_ptr + cur_edge]; // float src_weight = data[src_id]; // float dst_weight = data[dst_id]; // if(dst_weight > src_weight + weight) // { // data[dst_id] = src_weight + weight; // } // */ // } // } // } int main() { int vertices_count = 1024*1024; int *ptrs = new int[vertices_count]; int *data = new int[vertices_count]; int *connections = new int[vertices_count]; int pos = 0; for(int i = 0; i < vertices_count; i++) // TODO (bonus) граф с несколькими "большими" вершинами { ptrs[i] = pos; connections[i] = 16 + rand()%32; pos += connections[i]; data[i] = rand(); } int edges_count = pos; int *outgoing_ids = new int[edges_count]; int *result = new int[edges_count]; for(int i = 0; i < edges_count; i++) { outgoing_ids[i] = rand()%vertices_count; } int *dev_ptrs; int *dev_connections; int *dev_outgoing_ids; int *dev_data; int *dev_result; cudaMalloc((void**)&dev_ptrs, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_connections, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_data, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_outgoing_ids, edges_count*sizeof(int)); cudaMalloc((void**)&dev_result, edges_count*sizeof(int)); SAFE_CALL( cudaMemcpy(dev_ptrs, ptrs, vertices_count * sizeof(int), cudaMemcpyHostToDevice) ); SAFE_CALL( cudaMemcpy(dev_connections, connections, vertices_count * sizeof(int), cudaMemcpyHostToDevice) ); SAFE_CALL( cudaMemcpy(dev_data, data, vertices_count * sizeof(int), cudaMemcpyHostToDevice) ); SAFE_CALL( cudaMemcpy(dev_outgoing_ids, outgoing_ids, edges_count * sizeof(int), cudaMemcpyHostToDevice) ); dim3 compute_threads(1024); dim3 compute_blocks( 32*(vertices_count - 1) / compute_threads.x + 1); for (int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); gather<<<compute_blocks, compute_threads>>>(dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result); auto end = std::chrono::steady_clock::now(); // TODO почему работает данный замер веремени? std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } int *copy_device_result = new int[edges_count]; // TODO copy SAFE_CALL(cudaMemcpy(copy_device_result, dev_result, edges_count * sizeof(int), cudaMemcpyDeviceToHost)); for (int src_id = 0; src_id < vertices_count; src_id++) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for (register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } // TODO check int errors_count = 0; for (int i = 0; i < edges_count; i++) { if (result[i] != copy_device_result[i]) errors_count++; } cout << errors_count << endl; // TODO какие 3 недостатка у текущей версии ядра? // TODO отпрофилировать текущую версию, сделать выводы о её производитлеьности // TODO сделать оптимизированную версию ядра // TODO (bonus) реализовать базовую версию BFS алгоритма (выделить структуры данных и реализовать сам алгоритм) cudaFree(dev_data); cudaFree(dev_ptrs); cudaFree(dev_connections); cudaFree(dev_result); cudaFree(dev_outgoing_ids); delete[]result; delete[]data; delete[]ptrs; delete[]outgoing_ids; delete[]connections; return 0; }
7cf486423193fd506748e107f08e2ba5284da128.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/native/SortingUtils.h> #include <assert.h> #include <c10/macros/Macros.h> #include <stdlib.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <THH/THHDeviceUtils.cuh> // only for THCRoundUp? #include <THH/THHNumerics.cuh> #include <THH/THHScanUtils.cuh> #include <THH/THHTensorMathReduce.cuh> // AddOp #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/native/hip/SortingRadixSelect.cuh> #include <ATen/NamedTensorUtils.h> namespace at { namespace native { namespace { template <typename scalar_t, typename index_t, int Dim> __global__ void gatherKthValue( cuda::detail::TensorInfo<scalar_t, index_t> input, index_t inputSliceSize, index_t k, index_t numInputSlices, index_t inputWithinSliceStride, cuda::detail::TensorInfo<scalar_t, index_t> kthValue, cuda::detail::TensorInfo<int64_t, index_t> indices) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of index_t __shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit index_t slice = getLinearBlockId<index_t>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice index_t sliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input); index_t kthValueSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue); index_t indicesSliceStartIndex = cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices); scalar_t* inputSliceStart = &input.data[sliceStartIndex]; scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input scalar_t kValue = static_cast<scalar_t>(0); radixSelect< scalar_t, typename TopKTypeConfig<scalar_t>::RadixType, index_t, false>( inputSliceStart, k, inputSliceSize, inputWithinSliceStride, smem, &kValue); // Find the index of the k-th highest element index_t kValueIndex = 0; bool foundKValue = false; for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { bool inRange = (i < inputSliceSize); scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<scalar_t>(0); bool isKValue = inRange && THCNumerics<scalar_t>::eq_with_nan(v, kValue); if (isKValue) { kValueIndex = i; foundKValue = true; break; } } if (foundKValue) { kthValueSliceStart[0] = kValue; indicesSliceStart[0] = kValueIndex; } } struct KthValueLauncher { int64_t k; KthValueLauncher(int64_t k) : k(k) {} template <typename scalar_t, typename index_t, int all_dims> inline void launch( cuda::detail::TensorInfo<scalar_t, index_t> values_info, int collapse_values_dim, cuda::detail::TensorInfo<int64_t, index_t> indices_info, int collapse_indices_dim, cuda::detail::TensorInfo<scalar_t, index_t> self_info, int collapse_self_dim, int64_t num_slices, int64_t slice_size) { dim3 grid; if (!getGridFromTiles(num_slices, grid)) { AT_ERROR("slices are too many"); } dim3 block( ::min(THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( gatherKthValue<scalar_t, index_t, all_dims>), dim3(grid), dim3(block), 0, stream, self_info, slice_size, k, num_slices, /* The actual dimension that the k-selection is running in */ /* may have changed from collapseDims() */ self_info.strides[collapse_self_dim], values_info, indices_info); } }; template <typename scalar_t> void kthvalue_cuda_template( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim_, bool keepdim) { int64_t dim = maybe_wrap_dim(dim_, self.dim()); int64_t slicesize = self.size(dim); // FIXME: This seems bogus, I only do this because it was the old behaviour. // The reductions are fine, as long as the axis being reduced along // isn't of 0 elements (and the output has elements). TORCH_CHECK( self.numel() > 0, "cannot perform reduction function kthvalue", " on tensor with no elements because the operation does not have an identity"); TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range"); _reduction_with_indices_allocate_or_resize_output( values, indices, self, dim, keepdim); if (self.dim() == 0 && self.numel() == 1) { values.copy_(self); indices.zero_(); return; } TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); // Based on required index size, run the algorithm with the // appropriate index type if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(values) && cuda::detail::canUse32BitIndexMath(indices)) { run_launcher<scalar_t, uint32_t>( values, indices, self, dim, KthValueLauncher(k)); } else { run_launcher<scalar_t, uint64_t>( values, indices, self, dim, KthValueLauncher(k)); } if (!keepdim) { values.squeeze_(dim); indices.squeeze_(dim); } AT_CUDA_CHECK(hipGetLastError()); } // this does not reduce to median with dim because we don't want to copy twice template <typename scalar_t> Tensor median_cuda_template(const Tensor& self) { TORCH_CHECK(self.numel() > 0, "median cannot be called with empty tensor"); if (self.dim() == 0 && self.numel() == 1) { return self.clone(at::MemoryFormat::Contiguous); } auto self_copy = self.clone(at::MemoryFormat::Contiguous).view(-1); auto values = at::empty({1}, self.options()); auto indices = at::empty({1}, self.options().dtype(kLong)); TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); // Based on required index size, run the algorithm with the // appropriate index type if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(values) && cuda::detail::canUse32BitIndexMath(indices)) { run_launcher<scalar_t, uint32_t>( values, indices, self_copy, 0, KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based } else { run_launcher<scalar_t, uint64_t>( values, indices, self_copy, 0, KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based } return values.view({}); } } // namespace static std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] { kthvalue_cuda_template<scalar_t>(values, indices, self, k, dim, keepdim); }); return std::forward_as_tuple(values, indices); } std::tuple<Tensor&, Tensor&> kthvalue_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { auto result = [&]() { NoNamesGuard guard; return kthvalue_out_impl_cuda(values, indices, self, k, dim, keepdim); }(); namedinference::propagate_names_for_reduction(values, self, dim, keepdim); namedinference::propagate_names_for_reduction(indices, self, dim, keepdim); return result; } Tensor median_cuda(const Tensor& self) { NoNamesGuard guard; return AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "median", [&] { return median_cuda_template<scalar_t>(self); }); } } // namespace native } // namespace at
7cf486423193fd506748e107f08e2ba5284da128.cu
#include <ATen/ATen.h> #include <ATen/native/SortingUtils.h> #include <assert.h> #include <c10/macros/Macros.h> #include <stdlib.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <THC/THCDeviceUtils.cuh> // only for THCRoundUp? #include <THC/THCNumerics.cuh> #include <THC/THCScanUtils.cuh> #include <THC/THCTensorMathReduce.cuh> // AddOp #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/native/cuda/SortingRadixSelect.cuh> #include <ATen/NamedTensorUtils.h> namespace at { namespace native { namespace { template <typename scalar_t, typename index_t, int Dim> __global__ void gatherKthValue( cuda::detail::TensorInfo<scalar_t, index_t> input, index_t inputSliceSize, index_t k, index_t numInputSlices, index_t inputWithinSliceStride, cuda::detail::TensorInfo<scalar_t, index_t> kthValue, cuda::detail::TensorInfo<int64_t, index_t> indices) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of index_t __shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit index_t slice = getLinearBlockId<index_t>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice index_t sliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input); index_t kthValueSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue); index_t indicesSliceStartIndex = cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices); scalar_t* inputSliceStart = &input.data[sliceStartIndex]; scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input scalar_t kValue = static_cast<scalar_t>(0); radixSelect< scalar_t, typename TopKTypeConfig<scalar_t>::RadixType, index_t, false>( inputSliceStart, k, inputSliceSize, inputWithinSliceStride, smem, &kValue); // Find the index of the k-th highest element index_t kValueIndex = 0; bool foundKValue = false; for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { bool inRange = (i < inputSliceSize); scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<scalar_t>(0); bool isKValue = inRange && THCNumerics<scalar_t>::eq_with_nan(v, kValue); if (isKValue) { kValueIndex = i; foundKValue = true; break; } } if (foundKValue) { kthValueSliceStart[0] = kValue; indicesSliceStart[0] = kValueIndex; } } struct KthValueLauncher { int64_t k; KthValueLauncher(int64_t k) : k(k) {} template <typename scalar_t, typename index_t, int all_dims> inline void launch( cuda::detail::TensorInfo<scalar_t, index_t> values_info, int collapse_values_dim, cuda::detail::TensorInfo<int64_t, index_t> indices_info, int collapse_indices_dim, cuda::detail::TensorInfo<scalar_t, index_t> self_info, int collapse_self_dim, int64_t num_slices, int64_t slice_size) { dim3 grid; if (!getGridFromTiles(num_slices, grid)) { AT_ERROR("slices are too many"); } dim3 block( std::min(THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024)); auto stream = at::cuda::getCurrentCUDAStream(); gatherKthValue<scalar_t, index_t, all_dims><<<grid, block, 0, stream>>>( self_info, slice_size, k, num_slices, /* The actual dimension that the k-selection is running in */ /* may have changed from collapseDims() */ self_info.strides[collapse_self_dim], values_info, indices_info); } }; template <typename scalar_t> void kthvalue_cuda_template( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim_, bool keepdim) { int64_t dim = maybe_wrap_dim(dim_, self.dim()); int64_t slicesize = self.size(dim); // FIXME: This seems bogus, I only do this because it was the old behaviour. // The reductions are fine, as long as the axis being reduced along // isn't of 0 elements (and the output has elements). TORCH_CHECK( self.numel() > 0, "cannot perform reduction function kthvalue", " on tensor with no elements because the operation does not have an identity"); TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range"); _reduction_with_indices_allocate_or_resize_output( values, indices, self, dim, keepdim); if (self.dim() == 0 && self.numel() == 1) { values.copy_(self); indices.zero_(); return; } TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); // Based on required index size, run the algorithm with the // appropriate index type if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(values) && cuda::detail::canUse32BitIndexMath(indices)) { run_launcher<scalar_t, uint32_t>( values, indices, self, dim, KthValueLauncher(k)); } else { run_launcher<scalar_t, uint64_t>( values, indices, self, dim, KthValueLauncher(k)); } if (!keepdim) { values.squeeze_(dim); indices.squeeze_(dim); } AT_CUDA_CHECK(cudaGetLastError()); } // this does not reduce to median with dim because we don't want to copy twice template <typename scalar_t> Tensor median_cuda_template(const Tensor& self) { TORCH_CHECK(self.numel() > 0, "median cannot be called with empty tensor"); if (self.dim() == 0 && self.numel() == 1) { return self.clone(at::MemoryFormat::Contiguous); } auto self_copy = self.clone(at::MemoryFormat::Contiguous).view(-1); auto values = at::empty({1}, self.options()); auto indices = at::empty({1}, self.options().dtype(kLong)); TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); // Based on required index size, run the algorithm with the // appropriate index type if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(values) && cuda::detail::canUse32BitIndexMath(indices)) { run_launcher<scalar_t, uint32_t>( values, indices, self_copy, 0, KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based } else { run_launcher<scalar_t, uint64_t>( values, indices, self_copy, 0, KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based } return values.view({}); } } // namespace static std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] { kthvalue_cuda_template<scalar_t>(values, indices, self, k, dim, keepdim); }); return std::forward_as_tuple(values, indices); } std::tuple<Tensor&, Tensor&> kthvalue_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { auto result = [&]() { NoNamesGuard guard; return kthvalue_out_impl_cuda(values, indices, self, k, dim, keepdim); }(); namedinference::propagate_names_for_reduction(values, self, dim, keepdim); namedinference::propagate_names_for_reduction(indices, self, dim, keepdim); return result; } Tensor median_cuda(const Tensor& self) { NoNamesGuard guard; return AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "median", [&] { return median_cuda_template<scalar_t>(self); }); } } // namespace native } // namespace at
906db5df9ef2e7e372176a328a573935e562a565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/cudev.hpp" using namespace cv::cudev; namespace clahe { __global__ void calcLutKernel_8U(const PtrStepb src, PtrStepb lut, const int2 tileSize, const int tilesX, const int clipLimit, const float lutScale) { __shared__ int smem[256]; const int tx = blockIdx.x; const int ty = blockIdx.y; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; smem[tid] = 0; __syncthreads(); for (int i = threadIdx.y; i < tileSize.y; i += blockDim.y) { const uchar* srcPtr = src.ptr(ty * tileSize.y + i) + tx * tileSize.x; for (int j = threadIdx.x; j < tileSize.x; j += blockDim.x) { const int data = srcPtr[j]; ::atomicAdd(&smem[data], 1); } } __syncthreads(); int tHistVal = smem[tid]; __syncthreads(); if (clipLimit > 0) { // clip histogram bar int clipped = 0; if (tHistVal > clipLimit) { clipped = tHistVal - clipLimit; tHistVal = clipLimit; } // find number of overall clipped samples blockReduce<256>(smem, clipped, tid, plus<int>()); // broadcast evaluated value __shared__ int totalClipped; __shared__ int redistBatch; __shared__ int residual; __shared__ int rStep; if (tid == 0) { totalClipped = clipped; redistBatch = totalClipped / 256; residual = totalClipped - redistBatch * 256; rStep = 1; if (residual != 0) rStep = 256 / residual; } __syncthreads(); // redistribute clipped samples evenly tHistVal += redistBatch; if (residual && tid % rStep == 0 && tid / rStep < residual) ++tHistVal; } const int lutVal = blockScanInclusive<256>(tHistVal, smem, tid); lut(ty * tilesX + tx, tid) = saturate_cast<uchar>(__float2int_rn(lutScale * lutVal)); } __global__ void calcLutKernel_16U(const PtrStepus src, PtrStepus lut, const int2 tileSize, const int tilesX, const int clipLimit, const float lutScale, PtrStepSzi hist) { #define histSize 65536 #define blockSize 256 __shared__ int smem[blockSize]; const int tx = blockIdx.x; const int ty = blockIdx.y; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; const int histRow = ty * tilesX + tx; // build histogram for (int i = tid; i < histSize; i += blockSize) hist(histRow, i) = 0; __syncthreads(); for (int i = threadIdx.y; i < tileSize.y; i += blockDim.y) { const ushort* srcPtr = src.ptr(ty * tileSize.y + i) + tx * tileSize.x; for (int j = threadIdx.x; j < tileSize.x; j += blockDim.x) { const int data = srcPtr[j]; ::atomicAdd(&hist(histRow, data), 1); } } __syncthreads(); if (clipLimit > 0) { // clip histogram bar && // find number of overall clipped samples __shared__ int partialSum[blockSize]; for (int i = tid; i < histSize; i += blockSize) { int histVal = hist(histRow, i); int clipped = 0; if (histVal > clipLimit) { clipped = histVal - clipLimit; hist(histRow, i) = clipLimit; } // Following code block is in effect equivalent to: // // blockReduce<blockSize>(smem, clipped, tid, plus<int>()); // { for (int j = 16; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0xFFFFFFFFU, clipped, j); #else int val = __shfl_down(clipped, j); #endif clipped += val; } if (tid % 32 == 0) smem[tid / 32] = clipped; __syncthreads(); if (tid < 8) { clipped = smem[tid]; for (int j = 4; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0x000000FFU, clipped, j); #else int val = __shfl_down(clipped, j); #endif clipped += val; } } } // end of code block if (tid == 0) partialSum[i / blockSize] = clipped; __syncthreads(); } int partialSum_ = partialSum[tid]; // Following code block is in effect equivalent to: // // blockReduce<blockSize>(smem, partialSum_, tid, plus<int>()); // { for (int j = 16; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0xFFFFFFFFU, partialSum_, j); #else int val = __shfl_down(partialSum_, j); #endif partialSum_ += val; } if (tid % 32 == 0) smem[tid / 32] = partialSum_; __syncthreads(); if (tid < 8) { partialSum_ = smem[tid]; for (int j = 4; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0x000000FFU, partialSum_, j); #else int val = __shfl_down(partialSum_, j); #endif partialSum_ += val; } } } // end of code block // broadcast evaluated value && // redistribute clipped samples evenly __shared__ int totalClipped; __shared__ int redistBatch; __shared__ int residual; __shared__ int rStep; if (tid == 0) { totalClipped = partialSum_; redistBatch = totalClipped / histSize; residual = totalClipped - redistBatch * histSize; rStep = 1; if (residual != 0) rStep = histSize / residual; } __syncthreads(); for (int i = tid; i < histSize; i += blockSize) { int histVal = hist(histRow, i); int equalized = histVal + redistBatch; if (residual && i % rStep == 0 && i / rStep < residual) ++equalized; hist(histRow, i) = equalized; } } __shared__ int partialScan[blockSize]; for (int i = tid; i < histSize; i += blockSize) { int equalized = hist(histRow, i); equalized = blockScanInclusive<blockSize>(equalized, smem, tid); if (tid == blockSize - 1) partialScan[i / blockSize] = equalized; hist(histRow, i) = equalized; } __syncthreads(); int partialScan_ = partialScan[tid]; partialScan[tid] = blockScanExclusive<blockSize>(partialScan_, smem, tid); __syncthreads(); for (int i = tid; i < histSize; i += blockSize) { const int lutVal = hist(histRow, i) + partialScan[i / blockSize]; lut(histRow, i) = saturate_cast<ushort>(__float2int_rn(lutScale * lutVal)); } #undef histSize #undef blockSize } void calcLut_8U(PtrStepSzb src, PtrStepb lut, int tilesX, int tilesY, int2 tileSize, int clipLimit, float lutScale, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(tilesX, tilesY); hipLaunchKernelGGL(( calcLutKernel_8U), dim3(grid), dim3(block), 0, stream, src, lut, tileSize, tilesX, clipLimit, lutScale); CV_CUDEV_SAFE_CALL( hipGetLastError() ); if (stream == 0) CV_CUDEV_SAFE_CALL( hipDeviceSynchronize() ); } void calcLut_16U(PtrStepSzus src, PtrStepus lut, int tilesX, int tilesY, int2 tileSize, int clipLimit, float lutScale, PtrStepSzi hist, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(tilesX, tilesY); hipLaunchKernelGGL(( calcLutKernel_16U), dim3(grid), dim3(block), 0, stream, src, lut, tileSize, tilesX, clipLimit, lutScale, hist); CV_CUDEV_SAFE_CALL( hipGetLastError() ); if (stream == 0) CV_CUDEV_SAFE_CALL( hipDeviceSynchronize() ); } template <typename T> __global__ void transformKernel(const PtrStepSz<T> src, PtrStep<T> dst, const PtrStep<T> lut, const int2 tileSize, const int tilesX, const int tilesY) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= src.cols || y >= src.rows) return; const float tyf = (static_cast<float>(y) / tileSize.y) - 0.5f; int ty1 = __float2int_rd(tyf); int ty2 = ty1 + 1; const float ya = tyf - ty1; ty1 = ::max(ty1, 0); ty2 = ::min(ty2, tilesY - 1); const float txf = (static_cast<float>(x) / tileSize.x) - 0.5f; int tx1 = __float2int_rd(txf); int tx2 = tx1 + 1; const float xa = txf - tx1; tx1 = ::max(tx1, 0); tx2 = ::min(tx2, tilesX - 1); const int srcVal = src(y, x); float res = 0; res += lut(ty1 * tilesX + tx1, srcVal) * ((1.0f - xa) * (1.0f - ya)); res += lut(ty1 * tilesX + tx2, srcVal) * ((xa) * (1.0f - ya)); res += lut(ty2 * tilesX + tx1, srcVal) * ((1.0f - xa) * (ya)); res += lut(ty2 * tilesX + tx2, srcVal) * ((xa) * (ya)); dst(y, x) = saturate_cast<T>(res); } template <typename T> void transform(PtrStepSz<T> src, PtrStepSz<T> dst, PtrStep<T> lut, int tilesX, int tilesY, int2 tileSize, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); CV_CUDEV_SAFE_CALL( hipFuncSetCacheConfig(transformKernel<T>, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( transformKernel<T>), dim3(grid), dim3(block), 0, stream, src, dst, lut, tileSize, tilesX, tilesY); CV_CUDEV_SAFE_CALL( hipGetLastError() ); if (stream == 0) CV_CUDEV_SAFE_CALL( hipDeviceSynchronize() ); } template void transform<uchar>(PtrStepSz<uchar> src, PtrStepSz<uchar> dst, PtrStep<uchar> lut, int tilesX, int tilesY, int2 tileSize, hipStream_t stream); template void transform<ushort>(PtrStepSz<ushort> src, PtrStepSz<ushort> dst, PtrStep<ushort> lut, int tilesX, int tilesY, int2 tileSize, hipStream_t stream); } #endif // CUDA_DISABLER
906db5df9ef2e7e372176a328a573935e562a565.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/cudev.hpp" using namespace cv::cudev; namespace clahe { __global__ void calcLutKernel_8U(const PtrStepb src, PtrStepb lut, const int2 tileSize, const int tilesX, const int clipLimit, const float lutScale) { __shared__ int smem[256]; const int tx = blockIdx.x; const int ty = blockIdx.y; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; smem[tid] = 0; __syncthreads(); for (int i = threadIdx.y; i < tileSize.y; i += blockDim.y) { const uchar* srcPtr = src.ptr(ty * tileSize.y + i) + tx * tileSize.x; for (int j = threadIdx.x; j < tileSize.x; j += blockDim.x) { const int data = srcPtr[j]; ::atomicAdd(&smem[data], 1); } } __syncthreads(); int tHistVal = smem[tid]; __syncthreads(); if (clipLimit > 0) { // clip histogram bar int clipped = 0; if (tHistVal > clipLimit) { clipped = tHistVal - clipLimit; tHistVal = clipLimit; } // find number of overall clipped samples blockReduce<256>(smem, clipped, tid, plus<int>()); // broadcast evaluated value __shared__ int totalClipped; __shared__ int redistBatch; __shared__ int residual; __shared__ int rStep; if (tid == 0) { totalClipped = clipped; redistBatch = totalClipped / 256; residual = totalClipped - redistBatch * 256; rStep = 1; if (residual != 0) rStep = 256 / residual; } __syncthreads(); // redistribute clipped samples evenly tHistVal += redistBatch; if (residual && tid % rStep == 0 && tid / rStep < residual) ++tHistVal; } const int lutVal = blockScanInclusive<256>(tHistVal, smem, tid); lut(ty * tilesX + tx, tid) = saturate_cast<uchar>(__float2int_rn(lutScale * lutVal)); } __global__ void calcLutKernel_16U(const PtrStepus src, PtrStepus lut, const int2 tileSize, const int tilesX, const int clipLimit, const float lutScale, PtrStepSzi hist) { #define histSize 65536 #define blockSize 256 __shared__ int smem[blockSize]; const int tx = blockIdx.x; const int ty = blockIdx.y; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; const int histRow = ty * tilesX + tx; // build histogram for (int i = tid; i < histSize; i += blockSize) hist(histRow, i) = 0; __syncthreads(); for (int i = threadIdx.y; i < tileSize.y; i += blockDim.y) { const ushort* srcPtr = src.ptr(ty * tileSize.y + i) + tx * tileSize.x; for (int j = threadIdx.x; j < tileSize.x; j += blockDim.x) { const int data = srcPtr[j]; ::atomicAdd(&hist(histRow, data), 1); } } __syncthreads(); if (clipLimit > 0) { // clip histogram bar && // find number of overall clipped samples __shared__ int partialSum[blockSize]; for (int i = tid; i < histSize; i += blockSize) { int histVal = hist(histRow, i); int clipped = 0; if (histVal > clipLimit) { clipped = histVal - clipLimit; hist(histRow, i) = clipLimit; } // Following code block is in effect equivalent to: // // blockReduce<blockSize>(smem, clipped, tid, plus<int>()); // { for (int j = 16; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0xFFFFFFFFU, clipped, j); #else int val = __shfl_down(clipped, j); #endif clipped += val; } if (tid % 32 == 0) smem[tid / 32] = clipped; __syncthreads(); if (tid < 8) { clipped = smem[tid]; for (int j = 4; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0x000000FFU, clipped, j); #else int val = __shfl_down(clipped, j); #endif clipped += val; } } } // end of code block if (tid == 0) partialSum[i / blockSize] = clipped; __syncthreads(); } int partialSum_ = partialSum[tid]; // Following code block is in effect equivalent to: // // blockReduce<blockSize>(smem, partialSum_, tid, plus<int>()); // { for (int j = 16; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0xFFFFFFFFU, partialSum_, j); #else int val = __shfl_down(partialSum_, j); #endif partialSum_ += val; } if (tid % 32 == 0) smem[tid / 32] = partialSum_; __syncthreads(); if (tid < 8) { partialSum_ = smem[tid]; for (int j = 4; j >= 1; j /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int val = __shfl_down_sync(0x000000FFU, partialSum_, j); #else int val = __shfl_down(partialSum_, j); #endif partialSum_ += val; } } } // end of code block // broadcast evaluated value && // redistribute clipped samples evenly __shared__ int totalClipped; __shared__ int redistBatch; __shared__ int residual; __shared__ int rStep; if (tid == 0) { totalClipped = partialSum_; redistBatch = totalClipped / histSize; residual = totalClipped - redistBatch * histSize; rStep = 1; if (residual != 0) rStep = histSize / residual; } __syncthreads(); for (int i = tid; i < histSize; i += blockSize) { int histVal = hist(histRow, i); int equalized = histVal + redistBatch; if (residual && i % rStep == 0 && i / rStep < residual) ++equalized; hist(histRow, i) = equalized; } } __shared__ int partialScan[blockSize]; for (int i = tid; i < histSize; i += blockSize) { int equalized = hist(histRow, i); equalized = blockScanInclusive<blockSize>(equalized, smem, tid); if (tid == blockSize - 1) partialScan[i / blockSize] = equalized; hist(histRow, i) = equalized; } __syncthreads(); int partialScan_ = partialScan[tid]; partialScan[tid] = blockScanExclusive<blockSize>(partialScan_, smem, tid); __syncthreads(); for (int i = tid; i < histSize; i += blockSize) { const int lutVal = hist(histRow, i) + partialScan[i / blockSize]; lut(histRow, i) = saturate_cast<ushort>(__float2int_rn(lutScale * lutVal)); } #undef histSize #undef blockSize } void calcLut_8U(PtrStepSzb src, PtrStepb lut, int tilesX, int tilesY, int2 tileSize, int clipLimit, float lutScale, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(tilesX, tilesY); calcLutKernel_8U<<<grid, block, 0, stream>>>(src, lut, tileSize, tilesX, clipLimit, lutScale); CV_CUDEV_SAFE_CALL( cudaGetLastError() ); if (stream == 0) CV_CUDEV_SAFE_CALL( cudaDeviceSynchronize() ); } void calcLut_16U(PtrStepSzus src, PtrStepus lut, int tilesX, int tilesY, int2 tileSize, int clipLimit, float lutScale, PtrStepSzi hist, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(tilesX, tilesY); calcLutKernel_16U<<<grid, block, 0, stream>>>(src, lut, tileSize, tilesX, clipLimit, lutScale, hist); CV_CUDEV_SAFE_CALL( cudaGetLastError() ); if (stream == 0) CV_CUDEV_SAFE_CALL( cudaDeviceSynchronize() ); } template <typename T> __global__ void transformKernel(const PtrStepSz<T> src, PtrStep<T> dst, const PtrStep<T> lut, const int2 tileSize, const int tilesX, const int tilesY) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= src.cols || y >= src.rows) return; const float tyf = (static_cast<float>(y) / tileSize.y) - 0.5f; int ty1 = __float2int_rd(tyf); int ty2 = ty1 + 1; const float ya = tyf - ty1; ty1 = ::max(ty1, 0); ty2 = ::min(ty2, tilesY - 1); const float txf = (static_cast<float>(x) / tileSize.x) - 0.5f; int tx1 = __float2int_rd(txf); int tx2 = tx1 + 1; const float xa = txf - tx1; tx1 = ::max(tx1, 0); tx2 = ::min(tx2, tilesX - 1); const int srcVal = src(y, x); float res = 0; res += lut(ty1 * tilesX + tx1, srcVal) * ((1.0f - xa) * (1.0f - ya)); res += lut(ty1 * tilesX + tx2, srcVal) * ((xa) * (1.0f - ya)); res += lut(ty2 * tilesX + tx1, srcVal) * ((1.0f - xa) * (ya)); res += lut(ty2 * tilesX + tx2, srcVal) * ((xa) * (ya)); dst(y, x) = saturate_cast<T>(res); } template <typename T> void transform(PtrStepSz<T> src, PtrStepSz<T> dst, PtrStep<T> lut, int tilesX, int tilesY, int2 tileSize, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); CV_CUDEV_SAFE_CALL( cudaFuncSetCacheConfig(transformKernel<T>, cudaFuncCachePreferL1) ); transformKernel<T><<<grid, block, 0, stream>>>(src, dst, lut, tileSize, tilesX, tilesY); CV_CUDEV_SAFE_CALL( cudaGetLastError() ); if (stream == 0) CV_CUDEV_SAFE_CALL( cudaDeviceSynchronize() ); } template void transform<uchar>(PtrStepSz<uchar> src, PtrStepSz<uchar> dst, PtrStep<uchar> lut, int tilesX, int tilesY, int2 tileSize, cudaStream_t stream); template void transform<ushort>(PtrStepSz<ushort> src, PtrStepSz<ushort> dst, PtrStep<ushort> lut, int tilesX, int tilesY, int2 tileSize, cudaStream_t stream); } #endif // CUDA_DISABLER
26c9bcd9fbb7a8f581749c65f8a1fe3d9db06ac8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_initialise_chunk_kernel_z [3][2]; static int dims_initialise_chunk_kernel_z_h [3][2] = {0}; //user function __device__ void initialise_chunk_kernel_z_gpu(ACC<double> &vertexz, const ACC<int> &zz, ACC<double> &vertexdz) { int z_min=field.z_min-2; double min_z, d_z; d_z = (grid.zmax - grid.zmin)/(double)grid.z_cells; min_z=grid.zmin+d_z*field.back; vertexz(0,0,0) = min_z + d_z * (zz(0,0,0) - z_min); vertexdz(0,0,0) = (double)d_z; } __global__ void ops_initialise_chunk_kernel_z( double* __restrict arg0, int* __restrict arg1, double* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_z[0][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_z[0][0] * dims_initialise_chunk_kernel_z[0][1]; arg1 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_z[1][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_z[1][0] * dims_initialise_chunk_kernel_z[1][1]; arg2 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_z[2][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_z[2][0] * dims_initialise_chunk_kernel_z[2][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_initialise_chunk_kernel_z[0][0], dims_initialise_chunk_kernel_z[0][1], arg0); const ACC<int> argp1(dims_initialise_chunk_kernel_z[1][0], dims_initialise_chunk_kernel_z[1][1], arg1); ACC<double> argp2(dims_initialise_chunk_kernel_z[2][0], dims_initialise_chunk_kernel_z[2][1], arg2); initialise_chunk_kernel_z_gpu(argp0, argp1, argp2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_initialise_chunk_kernel_z_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,5)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(5,"initialise_chunk_kernel_z"); OPS_kernels[5].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; if (xdim0 != dims_initialise_chunk_kernel_z_h[0][0] || ydim0 != dims_initialise_chunk_kernel_z_h[0][1] || xdim1 != dims_initialise_chunk_kernel_z_h[1][0] || ydim1 != dims_initialise_chunk_kernel_z_h[1][1] || xdim2 != dims_initialise_chunk_kernel_z_h[2][0] || ydim2 != dims_initialise_chunk_kernel_z_h[2][1]) { dims_initialise_chunk_kernel_z_h[0][0] = xdim0; dims_initialise_chunk_kernel_z_h[0][1] = ydim0; dims_initialise_chunk_kernel_z_h[1][0] = xdim1; dims_initialise_chunk_kernel_z_h[1][1] = ydim1; dims_initialise_chunk_kernel_z_h[2][0] = xdim2; dims_initialise_chunk_kernel_z_h[2][1] = ydim2; cutilSafeCall(hipMemcpyToSymbol( dims_initialise_chunk_kernel_z, dims_initialise_chunk_kernel_z_h, sizeof(dims_initialise_chunk_kernel_z))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[5].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_initialise_chunk_kernel_z), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (int *)p_a[1], (double *)p_a[2],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[5].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[5].mpi_time += t2-t1; OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 5; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 5; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_initialise_chunk_kernel_z_execute; if (OPS_diags > 1) { ops_timing_realloc(5,"initialise_chunk_kernel_z"); } ops_enqueue_kernel(desc); } #endif
26c9bcd9fbb7a8f581749c65f8a1fe3d9db06ac8.cu
// // auto-generated by ops.py // __constant__ int dims_initialise_chunk_kernel_z [3][2]; static int dims_initialise_chunk_kernel_z_h [3][2] = {0}; //user function __device__ void initialise_chunk_kernel_z_gpu(ACC<double> &vertexz, const ACC<int> &zz, ACC<double> &vertexdz) { int z_min=field.z_min-2; double min_z, d_z; d_z = (grid.zmax - grid.zmin)/(double)grid.z_cells; min_z=grid.zmin+d_z*field.back; vertexz(0,0,0) = min_z + d_z * (zz(0,0,0) - z_min); vertexdz(0,0,0) = (double)d_z; } __global__ void ops_initialise_chunk_kernel_z( double* __restrict arg0, int* __restrict arg1, double* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_z[0][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_z[0][0] * dims_initialise_chunk_kernel_z[0][1]; arg1 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_z[1][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_z[1][0] * dims_initialise_chunk_kernel_z[1][1]; arg2 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_z[2][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_z[2][0] * dims_initialise_chunk_kernel_z[2][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_initialise_chunk_kernel_z[0][0], dims_initialise_chunk_kernel_z[0][1], arg0); const ACC<int> argp1(dims_initialise_chunk_kernel_z[1][0], dims_initialise_chunk_kernel_z[1][1], arg1); ACC<double> argp2(dims_initialise_chunk_kernel_z[2][0], dims_initialise_chunk_kernel_z[2][1], arg2); initialise_chunk_kernel_z_gpu(argp0, argp1, argp2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_initialise_chunk_kernel_z_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,5)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(5,"initialise_chunk_kernel_z"); OPS_kernels[5].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; if (xdim0 != dims_initialise_chunk_kernel_z_h[0][0] || ydim0 != dims_initialise_chunk_kernel_z_h[0][1] || xdim1 != dims_initialise_chunk_kernel_z_h[1][0] || ydim1 != dims_initialise_chunk_kernel_z_h[1][1] || xdim2 != dims_initialise_chunk_kernel_z_h[2][0] || ydim2 != dims_initialise_chunk_kernel_z_h[2][1]) { dims_initialise_chunk_kernel_z_h[0][0] = xdim0; dims_initialise_chunk_kernel_z_h[0][1] = ydim0; dims_initialise_chunk_kernel_z_h[1][0] = xdim1; dims_initialise_chunk_kernel_z_h[1][1] = ydim1; dims_initialise_chunk_kernel_z_h[2][0] = xdim2; dims_initialise_chunk_kernel_z_h[2][1] = ydim2; cutilSafeCall(cudaMemcpyToSymbol( dims_initialise_chunk_kernel_z, dims_initialise_chunk_kernel_z_h, sizeof(dims_initialise_chunk_kernel_z))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[5].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_initialise_chunk_kernel_z<<<grid, tblock >>> ( (double *)p_a[0], (int *)p_a[1], (double *)p_a[2],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[5].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[5].mpi_time += t2-t1; OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 5; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 5; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_initialise_chunk_kernel_z_execute; if (OPS_diags > 1) { ops_timing_realloc(5,"initialise_chunk_kernel_z"); } ops_enqueue_kernel(desc); } #endif
86885d6c237518174597028fe8442f4fb3aea41e.hip
// !!! This is a file automatically generated by hipify!!! #include "cudakernel/memory/gather_nd.h" #include "cudakernel/common/divmod_fast.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include "ppl/common/types.h" #include <hip/hip_runtime.h> #include <assert.h> #include <vector> template <typename T> __global__ void ppl_cukernel_gather_nd( int64_t num_elems, DivModFast piece_size_fast, int64_t* piece_offsets, const T* input, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; int piece_idx, offset; piece_size_fast.divmod(index, piece_idx, offset); int64_t base_offset = piece_offsets[piece_idx]; output[index] = input[base_offset + offset]; } template <typename IndexT> __global__ void ppl_cukernel_gather_nd_offset( int64_t num_pieces, DivModFast num_pieces_per_batch_fast, int batch_dim, int64_t* input_dims_gpu, int input_batch_stride, int64_t* input_strides_gpu, int indices_last_dim_size, const IndexT* indices_data, int64_t* piece_offsets) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_pieces) return; // batch offset int batch_idx = num_pieces_per_batch_fast.div(index); int64_t batch_offset = batch_idx * input_batch_stride; // inner offset const IndexT *indices_ptr = indices_data + index * indices_last_dim_size; int64_t rel_offset = 0; for (int it = 0; it < indices_last_dim_size; ++it) { IndexT cor_val = indices_ptr[it]; if (cor_val < 0) cor_val += input_dims_gpu[batch_dim + it]; assert(cor_val >= 0 && cor_val < input_dims_gpu[batch_dim + it]); rel_offset += cor_val * input_strides_gpu[it]; } piece_offsets[index] = batch_offset + rel_offset; } int64_t pplGatherNDGetTempBufferSize( const ppl::nn::TensorShape* input_shape, const void* input, const ppl::nn::TensorShape* indices_shape, const void* indices) { int num_input_dim = input_shape->GetDimCount(); int num_indices_dim = indices_shape->GetDimCount(); int num_pieces = indices_shape->GetElementsToDimensionIncludingPadding(num_indices_dim - 1); // pieces offsets and input strides and input_dims int64_t total_size = (num_pieces + 2 * num_input_dim) * sizeof(int64_t); return total_size; } ppl::common::RetCode PPLCUDAGatherNDForwardImp( hipStream_t stream, const ppl::nn::TensorShape* input_shape, const void* input, const ppl::nn::TensorShape* indices_shape, const void* indices, const ppl::nn::TensorShape* output_shape, void* output, void* temp_buffer, int batch_dim) { int num_batches = input_shape->GetElementsToDimensionIncludingPadding(batch_dim); int input_batch_stride = input_shape->GetElementsFromDimensionIncludingPadding(batch_dim); int num_indices_dim = indices_shape->GetDimCount(); int num_input_dim = input_shape->GetDimCount(); int indices_last_dim_size = indices_shape->GetDim(num_indices_dim - 1); int num_pieces = indices_shape->GetElementsToDimensionIncludingPadding(num_indices_dim - 1); DivModFast num_pieces_per_batch_fast(num_pieces / num_batches); int piece_size = input_shape->GetElementsFromDimensionIncludingPadding( batch_dim + indices_last_dim_size); int block_size = 256; // step 1: calcalute each piece's offset first int64_t *piece_offsets = static_cast<int64_t *>(temp_buffer); int64_t *input_strides_gpu = piece_offsets + num_pieces; int64_t *input_dims_gpu = input_strides_gpu + num_input_dim; std::vector<int64_t> input_strides(indices_last_dim_size); std::vector<int64_t> input_dims(num_input_dim); // dimension is partitioned as batch--indices_last_dim_size--piece_size int64_t acc_strides = piece_size; for (int it = 0; it < indices_last_dim_size; ++it) { input_strides[indices_last_dim_size - 1 - it] = acc_strides; acc_strides *= input_shape->GetDim(batch_dim + indices_last_dim_size - 1 - it); } for (int it = 0; it < num_input_dim; ++it) input_dims[it] = input_shape->GetDim(it); hipMemcpyAsync(input_strides_gpu, input_strides.data(), sizeof(int64_t) * indices_last_dim_size, hipMemcpyHostToDevice, stream); hipMemcpyAsync(input_dims_gpu, input_dims.data(), sizeof(int64_t) * num_input_dim, hipMemcpyHostToDevice, stream); int cal_offset_grid = (num_pieces + block_size - 1) / block_size; switch (ppl::common::GetSizeOfDataType(indices_shape->GetDataType())) { case sizeof(int32_t): { hipLaunchKernelGGL(( ppl_cukernel_gather_nd_offset), dim3(cal_offset_grid), dim3(block_size), 0, stream, num_pieces, num_pieces_per_batch_fast, batch_dim, input_dims_gpu, input_batch_stride, input_strides_gpu, indices_last_dim_size, (const int32_t *)indices, piece_offsets); break; } case sizeof(int64_t): { hipLaunchKernelGGL(( ppl_cukernel_gather_nd_offset), dim3(cal_offset_grid), dim3(block_size), 0, stream, num_pieces, num_pieces_per_batch_fast, batch_dim, input_dims_gpu, input_batch_stride, input_strides_gpu, indices_last_dim_size, (const int64_t *)indices, piece_offsets); break; } default: return ppl::common::RC_UNSUPPORTED; } // step2: begiin gather elements int64_t num_elems = output_shape->GetElementsIncludingPadding(); int gather_grid_size = (num_elems + block_size - 1) / block_size; DivModFast piece_size_fast(piece_size); #define SWITCH_CASE(TYPE) \ case sizeof(TYPE): { \ hipLaunchKernelGGL(( ppl_cukernel_gather_nd), dim3(gather_grid_size), dim3(block_size), \ 0, stream, num_elems, piece_size_fast, piece_offsets, (const TYPE *)input, (TYPE *)output); \ return ppl::common::RC_SUCCESS; \ } switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) { SWITCH_CASE(int8_t); SWITCH_CASE(int16_t); SWITCH_CASE(int32_t); SWITCH_CASE(int64_t); default: return ppl::common::RC_UNSUPPORTED; } #undef SWITCH_CASE }
86885d6c237518174597028fe8442f4fb3aea41e.cu
#include "cudakernel/memory/gather_nd.h" #include "cudakernel/common/divmod_fast.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include "ppl/common/types.h" #include <cuda_runtime.h> #include <assert.h> #include <vector> template <typename T> __global__ void ppl_cukernel_gather_nd( int64_t num_elems, DivModFast piece_size_fast, int64_t* piece_offsets, const T* input, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; int piece_idx, offset; piece_size_fast.divmod(index, piece_idx, offset); int64_t base_offset = piece_offsets[piece_idx]; output[index] = input[base_offset + offset]; } template <typename IndexT> __global__ void ppl_cukernel_gather_nd_offset( int64_t num_pieces, DivModFast num_pieces_per_batch_fast, int batch_dim, int64_t* input_dims_gpu, int input_batch_stride, int64_t* input_strides_gpu, int indices_last_dim_size, const IndexT* indices_data, int64_t* piece_offsets) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_pieces) return; // batch offset int batch_idx = num_pieces_per_batch_fast.div(index); int64_t batch_offset = batch_idx * input_batch_stride; // inner offset const IndexT *indices_ptr = indices_data + index * indices_last_dim_size; int64_t rel_offset = 0; for (int it = 0; it < indices_last_dim_size; ++it) { IndexT cor_val = indices_ptr[it]; if (cor_val < 0) cor_val += input_dims_gpu[batch_dim + it]; assert(cor_val >= 0 && cor_val < input_dims_gpu[batch_dim + it]); rel_offset += cor_val * input_strides_gpu[it]; } piece_offsets[index] = batch_offset + rel_offset; } int64_t pplGatherNDGetTempBufferSize( const ppl::nn::TensorShape* input_shape, const void* input, const ppl::nn::TensorShape* indices_shape, const void* indices) { int num_input_dim = input_shape->GetDimCount(); int num_indices_dim = indices_shape->GetDimCount(); int num_pieces = indices_shape->GetElementsToDimensionIncludingPadding(num_indices_dim - 1); // pieces offsets and input strides and input_dims int64_t total_size = (num_pieces + 2 * num_input_dim) * sizeof(int64_t); return total_size; } ppl::common::RetCode PPLCUDAGatherNDForwardImp( cudaStream_t stream, const ppl::nn::TensorShape* input_shape, const void* input, const ppl::nn::TensorShape* indices_shape, const void* indices, const ppl::nn::TensorShape* output_shape, void* output, void* temp_buffer, int batch_dim) { int num_batches = input_shape->GetElementsToDimensionIncludingPadding(batch_dim); int input_batch_stride = input_shape->GetElementsFromDimensionIncludingPadding(batch_dim); int num_indices_dim = indices_shape->GetDimCount(); int num_input_dim = input_shape->GetDimCount(); int indices_last_dim_size = indices_shape->GetDim(num_indices_dim - 1); int num_pieces = indices_shape->GetElementsToDimensionIncludingPadding(num_indices_dim - 1); DivModFast num_pieces_per_batch_fast(num_pieces / num_batches); int piece_size = input_shape->GetElementsFromDimensionIncludingPadding( batch_dim + indices_last_dim_size); int block_size = 256; // step 1: calcalute each piece's offset first int64_t *piece_offsets = static_cast<int64_t *>(temp_buffer); int64_t *input_strides_gpu = piece_offsets + num_pieces; int64_t *input_dims_gpu = input_strides_gpu + num_input_dim; std::vector<int64_t> input_strides(indices_last_dim_size); std::vector<int64_t> input_dims(num_input_dim); // dimension is partitioned as batch--indices_last_dim_size--piece_size int64_t acc_strides = piece_size; for (int it = 0; it < indices_last_dim_size; ++it) { input_strides[indices_last_dim_size - 1 - it] = acc_strides; acc_strides *= input_shape->GetDim(batch_dim + indices_last_dim_size - 1 - it); } for (int it = 0; it < num_input_dim; ++it) input_dims[it] = input_shape->GetDim(it); cudaMemcpyAsync(input_strides_gpu, input_strides.data(), sizeof(int64_t) * indices_last_dim_size, cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(input_dims_gpu, input_dims.data(), sizeof(int64_t) * num_input_dim, cudaMemcpyHostToDevice, stream); int cal_offset_grid = (num_pieces + block_size - 1) / block_size; switch (ppl::common::GetSizeOfDataType(indices_shape->GetDataType())) { case sizeof(int32_t): { ppl_cukernel_gather_nd_offset<<<cal_offset_grid, block_size, 0, stream>>>(num_pieces, num_pieces_per_batch_fast, batch_dim, input_dims_gpu, input_batch_stride, input_strides_gpu, indices_last_dim_size, (const int32_t *)indices, piece_offsets); break; } case sizeof(int64_t): { ppl_cukernel_gather_nd_offset<<<cal_offset_grid, block_size, 0, stream>>>(num_pieces, num_pieces_per_batch_fast, batch_dim, input_dims_gpu, input_batch_stride, input_strides_gpu, indices_last_dim_size, (const int64_t *)indices, piece_offsets); break; } default: return ppl::common::RC_UNSUPPORTED; } // step2: begiin gather elements int64_t num_elems = output_shape->GetElementsIncludingPadding(); int gather_grid_size = (num_elems + block_size - 1) / block_size; DivModFast piece_size_fast(piece_size); #define SWITCH_CASE(TYPE) \ case sizeof(TYPE): { \ ppl_cukernel_gather_nd<<<gather_grid_size, block_size, \ 0, stream>>>(num_elems, piece_size_fast, piece_offsets, (const TYPE *)input, (TYPE *)output); \ return ppl::common::RC_SUCCESS; \ } switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) { SWITCH_CASE(int8_t); SWITCH_CASE(int16_t); SWITCH_CASE(int32_t); SWITCH_CASE(int64_t); default: return ppl::common::RC_UNSUPPORTED; } #undef SWITCH_CASE }
ad2c853c5de5e34e88eeb327c0c9744c479b7f10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/prior_box_op.h" namespace paddle { namespace operators { template <typename T> __device__ inline T clip(T in) { return min(max(in, 0.), 1.); } template <typename T> __global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height, const int width, const int im_height, const int im_width, const int as_num, const T offset, const T step_width, const T step_height, const T* min_sizes, const T* max_sizes, const int min_num, bool is_clip) { int num_priors = max_sizes ? as_num * min_num + min_num : as_num * min_num; int box_num = height * width * num_priors; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < box_num; i += blockDim.x * gridDim.x) { int h = i / (num_priors * width); int w = (i / num_priors) % width; int p = i % num_priors; int m = max_sizes ? p / (as_num + 1) : p / as_num; T cx = (w + offset) * step_width; T cy = (h + offset) * step_height; T bw, bh; T min_size = min_sizes[m]; if (max_sizes) { int s = p % (as_num + 1); if (s < as_num) { T ar = aspect_ratios[s]; bw = min_size * sqrt(ar) / 2.; bh = min_size / sqrt(ar) / 2.; } else { T max_size = max_sizes[m]; bw = sqrt(min_size * max_size) / 2.; bh = bw; } } else { int s = p % as_num; T ar = aspect_ratios[s]; bw = min_size * sqrt(ar) / 2.; bh = min_size / sqrt(ar) / 2.; } T xmin = (cx - bw) / im_width; T ymin = (cy - bh) / im_height; T xmax = (cx + bw) / im_width; T ymax = (cy + bh) / im_height; out[i * 4] = is_clip ? clip<T>(xmin) : xmin; out[i * 4 + 1] = is_clip ? clip<T>(ymin) : ymin; out[i * 4 + 2] = is_clip ? clip<T>(xmax) : xmax; out[i * 4 + 3] = is_clip ? clip<T>(ymax) : ymax; } } template <typename T> __global__ void SetVariance(T* out, const T* var, const int vnum, const int num) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) { out[i] = var[i % vnum]; } } template <typename T> class PriorBoxOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes"); auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes"); auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto flip = ctx.Attr<bool>("flip"); auto clip = ctx.Attr<bool>("clip"); std::vector<float> aspect_ratios; ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); T step_w = static_cast<T>(ctx.Attr<float>("step_w")); T step_h = static_cast<T>(ctx.Attr<float>("step_h")); T offset = static_cast<T>(ctx.Attr<float>("offset")); auto im_width = image->dims()[3]; auto im_height = image->dims()[2]; auto width = input->dims()[3]; auto height = input->dims()[2]; T step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<T>(im_width) / width; step_height = static_cast<T>(im_height) / height; } else { step_width = step_w; step_height = step_h; } int num_priors = aspect_ratios.size() * min_sizes.size(); if (max_sizes.size() > 0) { num_priors += max_sizes.size(); } int min_num = static_cast<int>(min_sizes.size()); int box_num = width * height * num_priors; int block = 512; int grid = (box_num + block - 1) / block; auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); boxes->mutable_data<T>(ctx.GetPlace()); vars->mutable_data<T>(ctx.GetPlace()); framework::Tensor r; framework::TensorFromVector(aspect_ratios, ctx.device_context(), &r); framework::Tensor min; framework::TensorFromVector(min_sizes, ctx.device_context(), &min); T* max_data = nullptr; framework::Tensor max; if (max_sizes.size() > 0) { framework::TensorFromVector(max_sizes, ctx.device_context(), &max); max_data = max.data<T>(); } hipLaunchKernelGGL(( GenPriorBox<T>), dim3(grid), dim3(block), 0, stream, boxes->data<T>(), r.data<T>(), height, width, im_height, im_width, aspect_ratios.size(), offset, step_width, step_height, min.data<T>(), max_data, min_num, clip); framework::Tensor v; framework::TensorFromVector(variances, ctx.device_context(), &v); grid = (box_num * 4 + block - 1) / block; hipLaunchKernelGGL(( SetVariance<T>), dim3(grid), dim3(block), 0, stream, vars->data<T>(), v.data<T>(), variances.size(), box_num * 4); } }; // namespace operators } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(prior_box, ops::PriorBoxOpCUDAKernel<float>, ops::PriorBoxOpCUDAKernel<double>);
ad2c853c5de5e34e88eeb327c0c9744c479b7f10.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/prior_box_op.h" namespace paddle { namespace operators { template <typename T> __device__ inline T clip(T in) { return min(max(in, 0.), 1.); } template <typename T> __global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height, const int width, const int im_height, const int im_width, const int as_num, const T offset, const T step_width, const T step_height, const T* min_sizes, const T* max_sizes, const int min_num, bool is_clip) { int num_priors = max_sizes ? as_num * min_num + min_num : as_num * min_num; int box_num = height * width * num_priors; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < box_num; i += blockDim.x * gridDim.x) { int h = i / (num_priors * width); int w = (i / num_priors) % width; int p = i % num_priors; int m = max_sizes ? p / (as_num + 1) : p / as_num; T cx = (w + offset) * step_width; T cy = (h + offset) * step_height; T bw, bh; T min_size = min_sizes[m]; if (max_sizes) { int s = p % (as_num + 1); if (s < as_num) { T ar = aspect_ratios[s]; bw = min_size * sqrt(ar) / 2.; bh = min_size / sqrt(ar) / 2.; } else { T max_size = max_sizes[m]; bw = sqrt(min_size * max_size) / 2.; bh = bw; } } else { int s = p % as_num; T ar = aspect_ratios[s]; bw = min_size * sqrt(ar) / 2.; bh = min_size / sqrt(ar) / 2.; } T xmin = (cx - bw) / im_width; T ymin = (cy - bh) / im_height; T xmax = (cx + bw) / im_width; T ymax = (cy + bh) / im_height; out[i * 4] = is_clip ? clip<T>(xmin) : xmin; out[i * 4 + 1] = is_clip ? clip<T>(ymin) : ymin; out[i * 4 + 2] = is_clip ? clip<T>(xmax) : xmax; out[i * 4 + 3] = is_clip ? clip<T>(ymax) : ymax; } } template <typename T> __global__ void SetVariance(T* out, const T* var, const int vnum, const int num) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) { out[i] = var[i % vnum]; } } template <typename T> class PriorBoxOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes"); auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes"); auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto flip = ctx.Attr<bool>("flip"); auto clip = ctx.Attr<bool>("clip"); std::vector<float> aspect_ratios; ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); T step_w = static_cast<T>(ctx.Attr<float>("step_w")); T step_h = static_cast<T>(ctx.Attr<float>("step_h")); T offset = static_cast<T>(ctx.Attr<float>("offset")); auto im_width = image->dims()[3]; auto im_height = image->dims()[2]; auto width = input->dims()[3]; auto height = input->dims()[2]; T step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<T>(im_width) / width; step_height = static_cast<T>(im_height) / height; } else { step_width = step_w; step_height = step_h; } int num_priors = aspect_ratios.size() * min_sizes.size(); if (max_sizes.size() > 0) { num_priors += max_sizes.size(); } int min_num = static_cast<int>(min_sizes.size()); int box_num = width * height * num_priors; int block = 512; int grid = (box_num + block - 1) / block; auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); boxes->mutable_data<T>(ctx.GetPlace()); vars->mutable_data<T>(ctx.GetPlace()); framework::Tensor r; framework::TensorFromVector(aspect_ratios, ctx.device_context(), &r); framework::Tensor min; framework::TensorFromVector(min_sizes, ctx.device_context(), &min); T* max_data = nullptr; framework::Tensor max; if (max_sizes.size() > 0) { framework::TensorFromVector(max_sizes, ctx.device_context(), &max); max_data = max.data<T>(); } GenPriorBox<T><<<grid, block, 0, stream>>>( boxes->data<T>(), r.data<T>(), height, width, im_height, im_width, aspect_ratios.size(), offset, step_width, step_height, min.data<T>(), max_data, min_num, clip); framework::Tensor v; framework::TensorFromVector(variances, ctx.device_context(), &v); grid = (box_num * 4 + block - 1) / block; SetVariance<T><<<grid, block, 0, stream>>>(vars->data<T>(), v.data<T>(), variances.size(), box_num * 4); } }; // namespace operators } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(prior_box, ops::PriorBoxOpCUDAKernel<float>, ops::PriorBoxOpCUDAKernel<double>);
224d36cb2d71c10dfd07128261c1f23ff41a6800.hip
// !!! This is a file automatically generated by hipify!!! /* Defines the matrix operations for sequential dense with CUDA */ #include <petscpkg_version.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/ #include <petsccublas.h> /* cublas definitions are here */ #include <petsc/private/cudavecimpl.h> #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnCpotrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnCpotrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnCpotrs((a),(b),(c),(d),(hipComplex*)(e),(f),(hipComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnCsytrf((a),(b),(c),(hipComplex*)(d),(e),(f),(hipComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnCsytrf_bufferSize((a),(b),(hipComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnCgetrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnCgetrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnCgetrs((a),(b),(c),(d),(hipComplex*)(e),(f),(g),(hipComplex*)(h),(i),(j)) #else /* complex double */ #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnZpotrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnZpotrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnZpotrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(hipDoubleComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnZsytrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(f),(hipDoubleComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnZsytrf_bufferSize((a),(b),(hipDoubleComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnZgetrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnZgetrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnZgetrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(g),(hipDoubleComplex*)(h),(i),(j)) #endif #else /* real single */ #if defined(PETSC_USE_REAL_SINGLE) #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnSsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #else /* real double */ #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnDsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #endif #endif typedef struct { PetscScalar *d_v; /* pointer to the matrix on the GPU */ PetscBool user_alloc; PetscScalar *unplacedarray; /* if one called MatCUDADensePlaceArray(), this is where it stashed the original */ PetscBool unplaced_user_alloc; /* factorization support */ int *d_fact_ipiv; /* device pivots */ PetscScalar *d_fact_work; /* device workspace */ int fact_lwork; int *d_fact_info; /* device info */ /* workspace */ Vec workvec; } Mat_SeqDenseCUDA; PetscErrorCode MatSeqDenseCUDASetPreallocation(Mat A, PetscScalar *d_data) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscBool iscuda; hipError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) PetscFunctionReturn(0); /* it may happen CPU preallocation has not been performed */ ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); if (cA->lda <= 0) cA->lda = A->rmap->n; if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); } if (!d_data) { /* petsc-allocated storage */ ierr = PetscIntMultError(cA->lda,A->cmap->n,NULL);CHKERRQ(ierr); cerr = hipMalloc((void**)&dA->d_v,cA->lda*A->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr); dA->user_alloc = PETSC_FALSE; } else { /* user-allocated storage */ dA->d_v = d_data; dA->user_alloc = PETSC_TRUE; A->offloadmask = PETSC_OFFLOAD_GPU; } A->preallocated = PETSC_TRUE; A->assembled = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_GPU) { if (!cA->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = hipMemcpy(cA->v + j*cA->lda,dA->d_v + j*cA->lda,m*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } } else { cerr = hipMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool copy; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (A->boundtocpu) PetscFunctionReturn(0); copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED); ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (copy) { if (!dA->d_v) { /* Allocate GPU memory if not present */ ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = hipMemcpy(dA->d_v + j*cA->lda,cA->v + j*cA->lda,m*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); } } else { cerr = hipMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatCopy_SeqDenseCUDA(Mat A,Mat B,MatStructure str) { Mat_SeqDense *a = (Mat_SeqDense*)A->data,*b = (Mat_SeqDense*)B->data; PetscErrorCode ierr; const PetscScalar *va; PetscScalar *vb; PetscInt lda1=a->lda,lda2=b->lda, m=A->rmap->n,n=A->cmap->n, j; hipError_t cerr; PetscFunctionBegin; /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ if (A->ops->copy != B->ops->copy) { ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); PetscFunctionReturn(0); } if (m != B->rmap->n || n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"size(B) != size(A)"); ierr = MatDenseCUDAGetArrayRead(A,&va);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(B,&vb);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lda1>m || lda2>m) { for (j=0; j<n; j++) { cerr = hipMemcpy(vb+j*lda2,va+j*lda1,m*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } } else { cerr = hipMemcpy(vb,va,m*(n*sizeof(PetscScalar)),hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(B,&vb);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&va);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAPlaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (aa->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->unplacedarray = dA->d_v; dA->unplaced_user_alloc = dA->user_alloc; dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_TRUE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAResetArray_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->d_v = dA->unplacedarray; dA->user_alloc = dA->unplaced_user_alloc; dA->unplacedarray = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAReplaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; hipError_t cerr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); } dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_FALSE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (!dA->d_v) { ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArray_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArray_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A) { #if PETSC_PKG_CUDA_VERSION_GE(10,1,0) Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscErrorCode ierr; hipError_t ccer; cusolverStatus_t cerr; hipsolverDnHandle_t handle; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented"); else if (A->factortype == MAT_FACTOR_CHOLESKY) { if (!dA->d_fact_ipiv) { /* spd */ int il; ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); cerr = cusolverDnXpotri_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr); if (il > dA->fact_lwork) { dA->fact_lwork = il; ccer = hipFree(dA->d_fact_work);CHKERRCUDA(ccer); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotri(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); /* TODO (write cuda kernel) */ ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented"); } #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); A->ops->solve = NULL; A->ops->solvetranspose = NULL; A->ops->matsolve = NULL; A->factortype = MAT_FACTOR_NONE; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher"); #endif } static PetscErrorCode MatMatSolve_SeqDenseCUDA(Mat A,Mat B,Mat X) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *dx; hipsolverDnHandle_t handle; PetscBool iscuda; int nrhs,n,lda,ldx; #if defined(PETSC_USE_DEBUG) int info; #endif hipError_t ccer; cusolverStatus_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscObjectTypeCompareAny((PetscObject)X,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (X != B) { ierr = MatCopy(B,X,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); /* MatMatSolve does not have a dispatching mechanism, we may end up with a MATSEQDENSE here */ ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArray(X,&dx);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->cmap->n,&nrhs);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldx);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,HIPBLAS_OP_N,n,nrhs,da,lda,dA->d_fact_ipiv,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit hipErrorNotReady (error 34) due to "device not ready" on CUDA API call to hipEventQuery. */ cerr = hipsolverDnXpotrs(handle,HIPBLAS_FILL_MODE_LOWER,n,nrhs,da,lda,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(X,&dx);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSE,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(nrhs*(2.0*n*n - n));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,PetscBool trans) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *y; hipsolverDnHandle_t handle; int one = 1,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif hipError_t ccer; cusolverStatus_t cerr; PetscBool iscuda; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); /* MatSolve does not have a dispatching mechanism, we may end up with a VECSTANDARD here */ ierr = PetscObjectTypeCompareAny((PetscObject)yy,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (iscuda) { ierr = VecCopy(xx,yy);CHKERRQ(ierr); ierr = VecCUDAGetArray(yy,&y);CHKERRQ(ierr); } else { if (!dA->workvec) { ierr = MatCreateVecs(A,&dA->workvec,NULL);CHKERRQ(ierr); } ierr = VecCopy(xx,dA->workvec);CHKERRQ(ierr); ierr = VecCUDAGetArray(dA->workvec,&y);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,trans ? HIPBLAS_OP_T : HIPBLAS_OP_N,n,one,da,lda,dA->d_fact_ipiv,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit hipErrorNotReady (error 34) due to "device not ready" on CUDA API call to hipEventQuery. */ cerr = hipsolverDnXpotrs(handle,HIPBLAS_FILL_MODE_LOWER,n,one,da,lda,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (iscuda) { ierr = VecCUDARestoreArray(yy,&y);CHKERRQ(ierr); } else { ierr = VecCUDARestoreArray(dA->workvec,&y);CHKERRQ(ierr); ierr = VecCopy(dA->workvec,yy);CHKERRQ(ierr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(2.0*n*n - n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int m,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; hipsolverDnHandle_t handle; hipError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_LU; ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; hipsolverDnHandle_t handle; hipError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr); if (A->spd) { ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (!dA->fact_lwork) { cerr = hipsolverDnXpotrf_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = hipsolverDnXpotrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_CHOLESKY; ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU"); #if 0 /* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines The code below should work, and it can be activated when *sytrs routines will be available */ if (!dA->d_fact_ipiv) { ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXsytrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } /* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */ PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA,PetscBool tB) { const PetscScalar *da,*db; PetscScalar *dc; PetscScalar one=1.0,zero=0.0; int m,n,k; PetscInt alda,blda,clda; PetscErrorCode ierr; hipblasHandle_t cublasv2handle; PetscBool Aiscuda,Biscuda; hipblasStatus_t berr; hipError_t cerr; PetscFunctionBegin; /* we may end up with SEQDENSE as one of the arguments */ ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&Aiscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&Biscuda);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } ierr = PetscMPIIntCast(C->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(C->cmap->n,&n);CHKERRQ(ierr); if (tA) { ierr = PetscMPIIntCast(A->rmap->n,&k);CHKERRQ(ierr); } else { ierr = PetscMPIIntCast(A->cmap->n,&k);CHKERRQ(ierr); } if (!m || !n || !k) PetscFunctionReturn(0); ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr); ierr = MatDenseGetLDA(A,&alda);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemm(cublasv2handle,tA ? HIPBLAS_OP_T : HIPBLAS_OP_N,tB ? HIPBLAS_OP_T : HIPBLAS_OP_N, m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr); PetscFunctionReturn(0); } /* zz = op(A)*xx + yy if yy == NULL, only MatMult */ static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; const PetscScalar *xarray,*da; PetscScalar *zarray; PetscScalar one=1.0,zero=0.0; int m, n, lda; /* Use PetscMPIInt as it is typedef'ed to int */ hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (yy && yy != zz) { /* mult add */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); } if (!A->rmap->n || !A->cmap->n) { if (!yy) { /* mult only */ ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr); } PetscFunctionReturn(0); } ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemv(cublasv2handle,trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayWrite_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!mat->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatScale_SeqDenseCUDA(Mat Y,PetscScalar alpha) { Mat_SeqDense *y = (Mat_SeqDense*)Y->data; PetscScalar *dy; int j,N,m,lday,one = 1; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); ierr = PetscMPIIntCast(Y->rmap->n*Y->cmap->n,&N);CHKERRQ(ierr); ierr = PetscMPIIntCast(Y->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing Scale %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lday>m) { for (j=0; j<Y->cmap->n; j++) { berr = cublasXscal(cublasv2handle,m,&alpha,dy+lday*j,one);CHKERRCUBLAS(berr); } } else { berr = cublasXscal(cublasv2handle,N,&alpha,dy,one);CHKERRCUBLAS(berr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(N);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str) { Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDense *y = (Mat_SeqDense*)Y->data; const PetscScalar *dx; PetscScalar *dy; int j,N,m,ldax,lday,one = 1; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr); } ierr = PetscMPIIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldax);CHKERRQ(ierr); ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (ldax>m || lday>m) { for (j=0; j<X->cmap->n; j++) { berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr); } } else { berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatReset_SeqDenseCUDA(Mat A) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (dA) { if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); } cerr = hipFree(dA->d_fact_ipiv);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_info);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_work);CHKERRCUDA(cerr); ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr); } ierr = PetscFree(A->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; /* prevent to copy back data if we own the data pointer */ if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; } ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatDuplicateNoCreate_SeqDense(*B,A,cpvalues);CHKERRQ(ierr); if (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; const PetscScalar *da; PetscScalar *db; hipError_t cerr; PetscInt ldb; ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(*B,&db);CHKERRQ(ierr); ierr = MatDenseGetLDA(*B,&ldb);CHKERRQ(ierr); if (a->lda > A->rmap->n || ldb > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* it can be done better */ cerr = hipMemcpy(db+j*ldb,da+j*a->lda,m*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } } else { cerr = hipMemcpy(db,da,(sizeof(PetscScalar)*A->cmap->n)*A->rmap->n,hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(*B,&db);CHKERRQ(ierr); (*B)->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } #include <petsc/private/vecimpl.h> static PetscErrorCode MatGetColumnVector_SeqDenseCUDA(Mat A,Vec v,PetscInt col) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscScalar *x; PetscBool viscuda; hipError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompareAny((PetscObject)v,&viscuda,VECSEQCUDA,VECMPICUDA,VECCUDA,"");CHKERRQ(ierr); if (viscuda && !v->boundtocpu) { /* update device data */ ierr = VecCUDAGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask & PETSC_OFFLOAD_GPU) { cerr = hipMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyHostToHost);CHKERRCUDA(cerr); } else { cerr = hipMemcpy(x,a->v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = VecCUDARestoreArrayWrite(v,&x);CHKERRQ(ierr); } else { /* update host data */ ierr = VecGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask & PETSC_OFFLOAD_CPU) { ierr = PetscArraycpy(x,a->v+col*a->lda,A->rmap->n);CHKERRQ(ierr); } else { cerr = hipMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = VecRestoreArrayWrite(v,&x);CHKERRQ(ierr); } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr); ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU) { (*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense; } else { (*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense; } (*fact)->factortype = ftype; ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); ierr = VecLockReadPush(a->cvec);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecLockReadPop(a->cvec);CHKERRQ(ierr); ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetSubMatrix_SeqDenseCUDA(Mat A,PetscInt cbegin,PetscInt cend,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->cmat && cend-cbegin != a->cmat->cmap->N) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); if (!a->cmat) { ierr = MatCreateDenseCUDA(PetscObjectComm((PetscObject)A),A->rmap->n,PETSC_DECIDE,A->rmap->N,cend-cbegin,dA->d_v + (size_t)cbegin * (size_t)a->lda,&a->cmat);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cmat);CHKERRQ(ierr); } else { ierr = MatDenseCUDAPlaceArray(a->cmat,dA->d_v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } ierr = MatDenseSetLDA(a->cmat,a->lda);CHKERRQ(ierr); if (a->v) { ierr = MatDensePlaceArray(a->cmat,a->v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } a->cmat->offloadmask = A->offloadmask; a->matinuse = cbegin + 1; *v = a->cmat; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreSubMatrix_SeqDenseCUDA(Mat A,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetSubMatrix() first"); if (!a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column matrix"); if (*v != a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not the matrix obtained from MatDenseGetSubMatrix()"); a->matinuse = 0; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatDenseCUDAResetArray(a->cmat);CHKERRQ(ierr); ierr = MatDenseResetArray(a->cmat);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseSetLDA_SeqDenseCUDA(Mat A,PetscInt lda) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool data; PetscFunctionBegin; data = (PetscBool)((A->rmap->n > 0 && A->cmap->n > 0) ? (dA->d_v ? PETSC_TRUE : PETSC_FALSE) : PETSC_FALSE); if (!dA->user_alloc && data && cA->lda!=lda) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"LDA cannot be changed after allocation of internal storage"); if (lda < A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"LDA %D must be at least matrix dimension %D",lda,A->rmap->n); cA->lda = lda; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); A->boundtocpu = flg; if (!flg) { PetscBool iscuda; ierr = PetscObjectTypeCompare((PetscObject)a->cvec,VECSEQCUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = VecDestroy(&a->cvec);CHKERRQ(ierr); } ierr = PetscObjectTypeCompare((PetscObject)a->cmat,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDenseCUDA);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDenseCUDA; A->ops->mult = MatMult_SeqDenseCUDA; A->ops->multadd = MatMultAdd_SeqDenseCUDA; A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA; A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->axpy = MatAXPY_SeqDenseCUDA; A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA; A->ops->lufactor = MatLUFactor_SeqDenseCUDA; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA; A->ops->getcolumnvector = MatGetColumnVector_SeqDenseCUDA; A->ops->scale = MatScale_SeqDenseCUDA; A->ops->copy = MatCopy_SeqDenseCUDA; } else { /* make sure we have an up-to-date copy on the CPU */ ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDense);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDense; A->ops->mult = MatMult_SeqDense; A->ops->multadd = MatMultAdd_SeqDense; A->ops->multtranspose = MatMultTranspose_SeqDense; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense; A->ops->axpy = MatAXPY_SeqDense; A->ops->choleskyfactor = MatCholeskyFactor_SeqDense; A->ops->lufactor = MatLUFactor_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->getcolumnvector = MatGetColumnVector_SeqDense; A->ops->scale = MatScale_SeqDense; A->ops->copy = MatCopy_SeqDense; } if (a->cmat) { ierr = MatBindToCPU(a->cmat,flg);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",NULL);CHKERRQ(ierr); B->ops->bindtocpu = NULL; B->ops->destroy = MatDestroy_SeqDense; B->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat_SeqDenseCUDA *dB; Mat B; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C", MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C", MatDenseCUDAGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C", MatDenseCUDAGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C", MatDenseCUDAGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C", MatDenseCUDARestoreArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C", MatDenseCUDARestoreArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C", MatDenseCUDARestoreArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C", MatDenseCUDAPlaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C", MatDenseCUDAResetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C", MatDenseCUDAReplaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",MatProductSetFromOptions_SeqAIJ_SeqDense);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr); B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA; B->ops->destroy = MatDestroy_SeqDenseCUDA; PetscFunctionReturn(0); } /*@C MatCreateSeqDenseCUDA - Creates a sequential matrix in dense format using CUDA. Collective Input Parameters: + comm - MPI communicator . m - number of rows . n - number of columns - data - optional location of GPU matrix data. Set data=NULL for PETSc to control matrix memory allocation. Output Parameter: . A - the matrix Notes: Level: intermediate .seealso: MatCreate(), MatCreateSeqDense() @*/ PetscErrorCode MatCreateSeqDenseCUDA(MPI_Comm comm,PetscInt m,PetscInt n,PetscScalar *data,Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); if (size > 1) SETERRQ1(comm,PETSC_ERR_ARG_WRONG,"Invalid communicator size %d",size); ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = MatSeqDenseCUDASetPreallocation(*A,data);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs. Options Database Keys: . -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions() Level: beginner M*/ PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = MatCreate_SeqDense(B);CHKERRQ(ierr); ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); }
224d36cb2d71c10dfd07128261c1f23ff41a6800.cu
/* Defines the matrix operations for sequential dense with CUDA */ #include <petscpkg_version.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/ #include <petsccublas.h> /* cublas definitions are here */ #include <petsc/private/cudavecimpl.h> #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnCpotrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnCpotrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnCpotrs((a),(b),(c),(d),(cuComplex*)(e),(f),(cuComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnCsytrf((a),(b),(c),(cuComplex*)(d),(e),(f),(cuComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnCsytrf_bufferSize((a),(b),(cuComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnCgetrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnCgetrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnCgetrs((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j)) #else /* complex double */ #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnZpotrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnZpotrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnZpotrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(cuDoubleComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnZsytrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(f),(cuDoubleComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnZsytrf_bufferSize((a),(b),(cuDoubleComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnZgetrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnZgetrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnZgetrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j)) #endif #else /* real single */ #if defined(PETSC_USE_REAL_SINGLE) #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnSsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #else /* real double */ #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnDsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #endif #endif typedef struct { PetscScalar *d_v; /* pointer to the matrix on the GPU */ PetscBool user_alloc; PetscScalar *unplacedarray; /* if one called MatCUDADensePlaceArray(), this is where it stashed the original */ PetscBool unplaced_user_alloc; /* factorization support */ int *d_fact_ipiv; /* device pivots */ PetscScalar *d_fact_work; /* device workspace */ int fact_lwork; int *d_fact_info; /* device info */ /* workspace */ Vec workvec; } Mat_SeqDenseCUDA; PetscErrorCode MatSeqDenseCUDASetPreallocation(Mat A, PetscScalar *d_data) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscBool iscuda; cudaError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) PetscFunctionReturn(0); /* it may happen CPU preallocation has not been performed */ ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); if (cA->lda <= 0) cA->lda = A->rmap->n; if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); } if (!d_data) { /* petsc-allocated storage */ ierr = PetscIntMultError(cA->lda,A->cmap->n,NULL);CHKERRQ(ierr); cerr = cudaMalloc((void**)&dA->d_v,cA->lda*A->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr); dA->user_alloc = PETSC_FALSE; } else { /* user-allocated storage */ dA->d_v = d_data; dA->user_alloc = PETSC_TRUE; A->offloadmask = PETSC_OFFLOAD_GPU; } A->preallocated = PETSC_TRUE; A->assembled = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_GPU) { if (!cA->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = cudaMemcpy(cA->v + j*cA->lda,dA->d_v + j*cA->lda,m*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } } else { cerr = cudaMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool copy; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (A->boundtocpu) PetscFunctionReturn(0); copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED); ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (copy) { if (!dA->d_v) { /* Allocate GPU memory if not present */ ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = cudaMemcpy(dA->d_v + j*cA->lda,cA->v + j*cA->lda,m*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } } else { cerr = cudaMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatCopy_SeqDenseCUDA(Mat A,Mat B,MatStructure str) { Mat_SeqDense *a = (Mat_SeqDense*)A->data,*b = (Mat_SeqDense*)B->data; PetscErrorCode ierr; const PetscScalar *va; PetscScalar *vb; PetscInt lda1=a->lda,lda2=b->lda, m=A->rmap->n,n=A->cmap->n, j; cudaError_t cerr; PetscFunctionBegin; /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ if (A->ops->copy != B->ops->copy) { ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); PetscFunctionReturn(0); } if (m != B->rmap->n || n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"size(B) != size(A)"); ierr = MatDenseCUDAGetArrayRead(A,&va);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(B,&vb);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lda1>m || lda2>m) { for (j=0; j<n; j++) { cerr = cudaMemcpy(vb+j*lda2,va+j*lda1,m*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } } else { cerr = cudaMemcpy(vb,va,m*(n*sizeof(PetscScalar)),cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(B,&vb);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&va);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAPlaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (aa->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->unplacedarray = dA->d_v; dA->unplaced_user_alloc = dA->user_alloc; dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_TRUE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAResetArray_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->d_v = dA->unplacedarray; dA->user_alloc = dA->unplaced_user_alloc; dA->unplacedarray = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAReplaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; cudaError_t cerr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); } dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_FALSE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (!dA->d_v) { ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArray_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArray_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A) { #if PETSC_PKG_CUDA_VERSION_GE(10,1,0) Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscErrorCode ierr; cudaError_t ccer; cusolverStatus_t cerr; cusolverDnHandle_t handle; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented"); else if (A->factortype == MAT_FACTOR_CHOLESKY) { if (!dA->d_fact_ipiv) { /* spd */ int il; ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); cerr = cusolverDnXpotri_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr); if (il > dA->fact_lwork) { dA->fact_lwork = il; ccer = cudaFree(dA->d_fact_work);CHKERRCUDA(ccer); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotri(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); /* TODO (write cuda kernel) */ ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented"); } #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); A->ops->solve = NULL; A->ops->solvetranspose = NULL; A->ops->matsolve = NULL; A->factortype = MAT_FACTOR_NONE; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher"); #endif } static PetscErrorCode MatMatSolve_SeqDenseCUDA(Mat A,Mat B,Mat X) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *dx; cusolverDnHandle_t handle; PetscBool iscuda; int nrhs,n,lda,ldx; #if defined(PETSC_USE_DEBUG) int info; #endif cudaError_t ccer; cusolverStatus_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscObjectTypeCompareAny((PetscObject)X,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (X != B) { ierr = MatCopy(B,X,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); /* MatMatSolve does not have a dispatching mechanism, we may end up with a MATSEQDENSE here */ ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArray(X,&dx);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->cmap->n,&nrhs);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldx);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,CUBLAS_OP_N,n,nrhs,da,lda,dA->d_fact_ipiv,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit cudaErrorNotReady (error 34) due to "device not ready" on CUDA API call to cudaEventQuery. */ cerr = cusolverDnXpotrs(handle,CUBLAS_FILL_MODE_LOWER,n,nrhs,da,lda,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(X,&dx);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSE,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(nrhs*(2.0*n*n - n));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,PetscBool trans) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *y; cusolverDnHandle_t handle; int one = 1,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cudaError_t ccer; cusolverStatus_t cerr; PetscBool iscuda; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); /* MatSolve does not have a dispatching mechanism, we may end up with a VECSTANDARD here */ ierr = PetscObjectTypeCompareAny((PetscObject)yy,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (iscuda) { ierr = VecCopy(xx,yy);CHKERRQ(ierr); ierr = VecCUDAGetArray(yy,&y);CHKERRQ(ierr); } else { if (!dA->workvec) { ierr = MatCreateVecs(A,&dA->workvec,NULL);CHKERRQ(ierr); } ierr = VecCopy(xx,dA->workvec);CHKERRQ(ierr); ierr = VecCUDAGetArray(dA->workvec,&y);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,trans ? CUBLAS_OP_T : CUBLAS_OP_N,n,one,da,lda,dA->d_fact_ipiv,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit cudaErrorNotReady (error 34) due to "device not ready" on CUDA API call to cudaEventQuery. */ cerr = cusolverDnXpotrs(handle,CUBLAS_FILL_MODE_LOWER,n,one,da,lda,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (iscuda) { ierr = VecCUDARestoreArray(yy,&y);CHKERRQ(ierr); } else { ierr = VecCUDARestoreArray(dA->workvec,&y);CHKERRQ(ierr); ierr = VecCopy(dA->workvec,yy);CHKERRQ(ierr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(2.0*n*n - n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int m,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; cusolverDnHandle_t handle; cudaError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_LU; ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; cusolverDnHandle_t handle; cudaError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr); if (A->spd) { ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (!dA->fact_lwork) { cerr = cusolverDnXpotrf_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForCUDA();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_CHOLESKY; ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU"); #if 0 /* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines The code below should work, and it can be activated when *sytrs routines will be available */ if (!dA->d_fact_ipiv) { ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXsytrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } /* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */ PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA,PetscBool tB) { const PetscScalar *da,*db; PetscScalar *dc; PetscScalar one=1.0,zero=0.0; int m,n,k; PetscInt alda,blda,clda; PetscErrorCode ierr; cublasHandle_t cublasv2handle; PetscBool Aiscuda,Biscuda; cublasStatus_t berr; cudaError_t cerr; PetscFunctionBegin; /* we may end up with SEQDENSE as one of the arguments */ ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&Aiscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&Biscuda);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } ierr = PetscMPIIntCast(C->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(C->cmap->n,&n);CHKERRQ(ierr); if (tA) { ierr = PetscMPIIntCast(A->rmap->n,&k);CHKERRQ(ierr); } else { ierr = PetscMPIIntCast(A->cmap->n,&k);CHKERRQ(ierr); } if (!m || !n || !k) PetscFunctionReturn(0); ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr); ierr = MatDenseGetLDA(A,&alda);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemm(cublasv2handle,tA ? CUBLAS_OP_T : CUBLAS_OP_N,tB ? CUBLAS_OP_T : CUBLAS_OP_N, m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr); PetscFunctionReturn(0); } /* zz = op(A)*xx + yy if yy == NULL, only MatMult */ static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; const PetscScalar *xarray,*da; PetscScalar *zarray; PetscScalar one=1.0,zero=0.0; int m, n, lda; /* Use PetscMPIInt as it is typedef'ed to int */ cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (yy && yy != zz) { /* mult add */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); } if (!A->rmap->n || !A->cmap->n) { if (!yy) { /* mult only */ ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr); } PetscFunctionReturn(0); } ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemv(cublasv2handle,trans ? CUBLAS_OP_T : CUBLAS_OP_N, m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayWrite_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!mat->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatScale_SeqDenseCUDA(Mat Y,PetscScalar alpha) { Mat_SeqDense *y = (Mat_SeqDense*)Y->data; PetscScalar *dy; int j,N,m,lday,one = 1; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); ierr = PetscMPIIntCast(Y->rmap->n*Y->cmap->n,&N);CHKERRQ(ierr); ierr = PetscMPIIntCast(Y->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing Scale %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lday>m) { for (j=0; j<Y->cmap->n; j++) { berr = cublasXscal(cublasv2handle,m,&alpha,dy+lday*j,one);CHKERRCUBLAS(berr); } } else { berr = cublasXscal(cublasv2handle,N,&alpha,dy,one);CHKERRCUBLAS(berr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(N);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str) { Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDense *y = (Mat_SeqDense*)Y->data; const PetscScalar *dx; PetscScalar *dy; int j,N,m,ldax,lday,one = 1; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr); } ierr = PetscMPIIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldax);CHKERRQ(ierr); ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (ldax>m || lday>m) { for (j=0; j<X->cmap->n; j++) { berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr); } } else { berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatReset_SeqDenseCUDA(Mat A) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (dA) { if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); } cerr = cudaFree(dA->d_fact_ipiv);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_info);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_work);CHKERRCUDA(cerr); ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr); } ierr = PetscFree(A->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; /* prevent to copy back data if we own the data pointer */ if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; } ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatDuplicateNoCreate_SeqDense(*B,A,cpvalues);CHKERRQ(ierr); if (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; const PetscScalar *da; PetscScalar *db; cudaError_t cerr; PetscInt ldb; ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(*B,&db);CHKERRQ(ierr); ierr = MatDenseGetLDA(*B,&ldb);CHKERRQ(ierr); if (a->lda > A->rmap->n || ldb > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* it can be done better */ cerr = cudaMemcpy(db+j*ldb,da+j*a->lda,m*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } } else { cerr = cudaMemcpy(db,da,(sizeof(PetscScalar)*A->cmap->n)*A->rmap->n,cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(*B,&db);CHKERRQ(ierr); (*B)->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } #include <petsc/private/vecimpl.h> static PetscErrorCode MatGetColumnVector_SeqDenseCUDA(Mat A,Vec v,PetscInt col) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscScalar *x; PetscBool viscuda; cudaError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompareAny((PetscObject)v,&viscuda,VECSEQCUDA,VECMPICUDA,VECCUDA,"");CHKERRQ(ierr); if (viscuda && !v->boundtocpu) { /* update device data */ ierr = VecCUDAGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask & PETSC_OFFLOAD_GPU) { cerr = cudaMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyHostToHost);CHKERRCUDA(cerr); } else { cerr = cudaMemcpy(x,a->v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = VecCUDARestoreArrayWrite(v,&x);CHKERRQ(ierr); } else { /* update host data */ ierr = VecGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask & PETSC_OFFLOAD_CPU) { ierr = PetscArraycpy(x,a->v+col*a->lda,A->rmap->n);CHKERRQ(ierr); } else { cerr = cudaMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = VecRestoreArrayWrite(v,&x);CHKERRQ(ierr); } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr); ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU) { (*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense; } else { (*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense; } (*fact)->factortype = ftype; ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); ierr = VecLockReadPush(a->cvec);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecLockReadPop(a->cvec);CHKERRQ(ierr); ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetSubMatrix_SeqDenseCUDA(Mat A,PetscInt cbegin,PetscInt cend,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->cmat && cend-cbegin != a->cmat->cmap->N) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); if (!a->cmat) { ierr = MatCreateDenseCUDA(PetscObjectComm((PetscObject)A),A->rmap->n,PETSC_DECIDE,A->rmap->N,cend-cbegin,dA->d_v + (size_t)cbegin * (size_t)a->lda,&a->cmat);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cmat);CHKERRQ(ierr); } else { ierr = MatDenseCUDAPlaceArray(a->cmat,dA->d_v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } ierr = MatDenseSetLDA(a->cmat,a->lda);CHKERRQ(ierr); if (a->v) { ierr = MatDensePlaceArray(a->cmat,a->v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } a->cmat->offloadmask = A->offloadmask; a->matinuse = cbegin + 1; *v = a->cmat; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreSubMatrix_SeqDenseCUDA(Mat A,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetSubMatrix() first"); if (!a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column matrix"); if (*v != a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not the matrix obtained from MatDenseGetSubMatrix()"); a->matinuse = 0; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatDenseCUDAResetArray(a->cmat);CHKERRQ(ierr); ierr = MatDenseResetArray(a->cmat);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseSetLDA_SeqDenseCUDA(Mat A,PetscInt lda) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool data; PetscFunctionBegin; data = (PetscBool)((A->rmap->n > 0 && A->cmap->n > 0) ? (dA->d_v ? PETSC_TRUE : PETSC_FALSE) : PETSC_FALSE); if (!dA->user_alloc && data && cA->lda!=lda) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"LDA cannot be changed after allocation of internal storage"); if (lda < A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"LDA %D must be at least matrix dimension %D",lda,A->rmap->n); cA->lda = lda; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); A->boundtocpu = flg; if (!flg) { PetscBool iscuda; ierr = PetscObjectTypeCompare((PetscObject)a->cvec,VECSEQCUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = VecDestroy(&a->cvec);CHKERRQ(ierr); } ierr = PetscObjectTypeCompare((PetscObject)a->cmat,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDenseCUDA);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDenseCUDA; A->ops->mult = MatMult_SeqDenseCUDA; A->ops->multadd = MatMultAdd_SeqDenseCUDA; A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA; A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->axpy = MatAXPY_SeqDenseCUDA; A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA; A->ops->lufactor = MatLUFactor_SeqDenseCUDA; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA; A->ops->getcolumnvector = MatGetColumnVector_SeqDenseCUDA; A->ops->scale = MatScale_SeqDenseCUDA; A->ops->copy = MatCopy_SeqDenseCUDA; } else { /* make sure we have an up-to-date copy on the CPU */ ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDense);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDense; A->ops->mult = MatMult_SeqDense; A->ops->multadd = MatMultAdd_SeqDense; A->ops->multtranspose = MatMultTranspose_SeqDense; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense; A->ops->axpy = MatAXPY_SeqDense; A->ops->choleskyfactor = MatCholeskyFactor_SeqDense; A->ops->lufactor = MatLUFactor_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->getcolumnvector = MatGetColumnVector_SeqDense; A->ops->scale = MatScale_SeqDense; A->ops->copy = MatCopy_SeqDense; } if (a->cmat) { ierr = MatBindToCPU(a->cmat,flg);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",NULL);CHKERRQ(ierr); B->ops->bindtocpu = NULL; B->ops->destroy = MatDestroy_SeqDense; B->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat_SeqDenseCUDA *dB; Mat B; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C", MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C", MatDenseCUDAGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C", MatDenseCUDAGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C", MatDenseCUDAGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C", MatDenseCUDARestoreArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C", MatDenseCUDARestoreArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C", MatDenseCUDARestoreArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C", MatDenseCUDAPlaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C", MatDenseCUDAResetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C", MatDenseCUDAReplaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",MatProductSetFromOptions_SeqAIJ_SeqDense);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr); B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA; B->ops->destroy = MatDestroy_SeqDenseCUDA; PetscFunctionReturn(0); } /*@C MatCreateSeqDenseCUDA - Creates a sequential matrix in dense format using CUDA. Collective Input Parameters: + comm - MPI communicator . m - number of rows . n - number of columns - data - optional location of GPU matrix data. Set data=NULL for PETSc to control matrix memory allocation. Output Parameter: . A - the matrix Notes: Level: intermediate .seealso: MatCreate(), MatCreateSeqDense() @*/ PetscErrorCode MatCreateSeqDenseCUDA(MPI_Comm comm,PetscInt m,PetscInt n,PetscScalar *data,Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); if (size > 1) SETERRQ1(comm,PETSC_ERR_ARG_WRONG,"Invalid communicator size %d",size); ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = MatSeqDenseCUDASetPreallocation(*A,data);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs. Options Database Keys: . -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions() Level: beginner M*/ PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = MatCreate_SeqDense(B);CHKERRQ(ierr); ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); }
9deb4b03f1e1785f6f8d5d0e86fd2820131df52e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string> #include <chrono> #include <time.h> #include <iostream> #include <cfloat> #include <algorithm> // not sure if needed //for CUDA garbage - maybe useful #include <hip/hip_runtime.h> //#define int LENGTH 100 // length of data set - okay this doesnt work aparently int LENGTH = 1000; //this is possible error, not save mem correctly #if __CUDA_ARCH__ < 600 // allows us to use atomicMin with doubles on our old as dirt CUDA versions __device__ double atomicMinf(double* address, double val){ unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; while (val < __longlong_as_double(old)) { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val)); // (old == assumed ? val : old) } return __longlong_as_double(old); } #endif //CUDA kernal for part one __global__ void findLeast(const double *array, double *m, const int size){ extern __shared__ double share[]; // extern : "size determined at runtime by the kernel's caller via a launch configuration argument" - whatever that means int tid = threadIdx.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; // what element to work on share[tid] = DBL_MAX; // initalize with largest num while(gid < size){ //check to see if in range share[tid] = max(share[tid], array[gid]); gid += gridDim.x*blockDim.x; // what element each thread should work on } __syncthreads(); gid = blockDim.x * blockIdx.x + threadIdx.x; // reset gid for future calculations // reduce spread out shared memory in block into one location: shared[0] for(int i = blockDim.x / 2; i > 0; i/=2){ // note blockDim.x is number of threads in a block, always even if(tid < i and gid < size) share[tid] = min(share[tid], share[tid + i]); __syncthreads(); } // compare across blocks to find true min if(tid == 0) *m = atomicMinf(m, share[0]); // or is the error here, no clue } /////////////////////////////////////////////////////////////////////////////// void fillArray(double *n, int s){ std::srand(std::time(NULL)); // lets seed rand with time for fun!! for(int i = 0; i < s; i++){ n[i] = (double)std::rand(); } } void printArray(float *n, int s){ for(int i = 0; i < s; i++){ printf("%.5f ", n[i]); } } //finish double checkMin(double* a, int size){ double min = DBL_MAX; return min; } int main(int argc, char **argv){ size_t size = LENGTH * sizeof(double); double *h_a = (double*)malloc(size); // allocate mem for host array double *output = (double*)malloc(sizeof(double)); // allocate memory for output hipError_t err = hipSuccess; // error check, maybe gets implemented if(h_a == NULL || output == NULL){ fprintf(stderr, "Failed to allocate main memory"); exit(EXIT_FAILURE); } // fill the array with random values, fill output with max value fillArray(h_a, LENGTH); *output = DBL_MAX; // allocate memory on device for input vector a double *d_a = NULL; err = hipMalloc(&d_a, size); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector a (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // allocate memory on device for output o double *d_o = NULL; err = hipMalloc(&d_o, sizeof(double)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate output (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // copy main memory data into device memory err = hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector a from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_o, output, sizeof(double), hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy output from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // ready for takeoff, luanch CUDA kernal int threadsPerBlock = 1024; int blocksPerGrid = (LENGTH + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( findLeast), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, d_o, LENGTH); err = hipGetLastError(); if(err != hipSuccess){ fprintf(stderr,"(error code %s)\nYou done screwed up\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(&output, d_o, sizeof(double), hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy device d_o to host output (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //this should be where we verify the output printf("The minimum number: %f\n", *output); hipFree(d_a); hipFree(d_o); free(h_a); free(output); return 0; }
9deb4b03f1e1785f6f8d5d0e86fd2820131df52e.cu
#include <stdio.h> #include <stdlib.h> #include <string> #include <chrono> #include <time.h> #include <iostream> #include <cfloat> #include <algorithm> // not sure if needed //for CUDA garbage - maybe useful #include <cuda_runtime.h> //#define int LENGTH 100 // length of data set - okay this doesnt work aparently int LENGTH = 1000; //this is possible error, not save mem correctly #if __CUDA_ARCH__ < 600 // allows us to use atomicMin with doubles on our old as dirt CUDA versions __device__ double atomicMinf(double* address, double val){ unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; while (val < __longlong_as_double(old)) { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val)); // (old == assumed ? val : old) } return __longlong_as_double(old); } #endif //CUDA kernal for part one __global__ void findLeast(const double *array, double *m, const int size){ extern __shared__ double share[]; // extern : "size determined at runtime by the kernel's caller via a launch configuration argument" - whatever that means int tid = threadIdx.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; // what element to work on share[tid] = DBL_MAX; // initalize with largest num while(gid < size){ //check to see if in range share[tid] = max(share[tid], array[gid]); gid += gridDim.x*blockDim.x; // what element each thread should work on } __syncthreads(); gid = blockDim.x * blockIdx.x + threadIdx.x; // reset gid for future calculations // reduce spread out shared memory in block into one location: shared[0] for(int i = blockDim.x / 2; i > 0; i/=2){ // note blockDim.x is number of threads in a block, always even if(tid < i and gid < size) share[tid] = min(share[tid], share[tid + i]); __syncthreads(); } // compare across blocks to find true min if(tid == 0) *m = atomicMinf(m, share[0]); // or is the error here, no clue } /////////////////////////////////////////////////////////////////////////////// void fillArray(double *n, int s){ std::srand(std::time(NULL)); // lets seed rand with time for fun!! for(int i = 0; i < s; i++){ n[i] = (double)std::rand(); } } void printArray(float *n, int s){ for(int i = 0; i < s; i++){ printf("%.5f ", n[i]); } } //finish double checkMin(double* a, int size){ double min = DBL_MAX; return min; } int main(int argc, char **argv){ size_t size = LENGTH * sizeof(double); double *h_a = (double*)malloc(size); // allocate mem for host array double *output = (double*)malloc(sizeof(double)); // allocate memory for output cudaError_t err = cudaSuccess; // error check, maybe gets implemented if(h_a == NULL || output == NULL){ fprintf(stderr, "Failed to allocate main memory"); exit(EXIT_FAILURE); } // fill the array with random values, fill output with max value fillArray(h_a, LENGTH); *output = DBL_MAX; // allocate memory on device for input vector a double *d_a = NULL; err = cudaMalloc(&d_a, size); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector a (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // allocate memory on device for output o double *d_o = NULL; err = cudaMalloc(&d_o, sizeof(double)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate output (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // copy main memory data into device memory err = cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector a from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_o, output, sizeof(double), cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy output from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // ready for takeoff, luanch CUDA kernal int threadsPerBlock = 1024; int blocksPerGrid = (LENGTH + threadsPerBlock - 1) / threadsPerBlock; findLeast<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_o, LENGTH); err = cudaGetLastError(); if(err != cudaSuccess){ fprintf(stderr,"(error code %s)\nYou done screwed up\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(&output, d_o, sizeof(double), cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy device d_o to host output (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //this should be where we verify the output printf("The minimum number: %f\n", *output); cudaFree(d_a); cudaFree(d_o); free(h_a); free(output); return 0; }